2006-06-18 13:43:12 +08:00
|
|
|
//===--- Lexer.cpp - C Language Family Lexer ------------------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 03:59:25 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2006-06-18 13:43:12 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2007-07-21 00:59:19 +08:00
|
|
|
// This file implements the Lexer and Token interfaces.
|
2006-06-18 13:43:12 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// TODO: GCC Diagnostics emitted by the lexer:
|
|
|
|
// PEDWARN: (form feed|vertical tab) in preprocessing directive
|
|
|
|
//
|
|
|
|
// Universal characters, unicode, char mapping:
|
|
|
|
// WARNING: `%.*s' is not in NFKC
|
|
|
|
// WARNING: `%.*s' is not in NFC
|
|
|
|
//
|
|
|
|
// Other:
|
|
|
|
// TODO: Options to support:
|
|
|
|
// -fexec-charset,-fwide-exec-charset
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "clang/Lex/Lexer.h"
|
2013-02-09 06:30:22 +08:00
|
|
|
#include "clang/Basic/CharInfo.h"
|
2007-07-21 00:37:10 +08:00
|
|
|
#include "clang/Basic/SourceManager.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "clang/Lex/CodeCompletionHandler.h"
|
|
|
|
#include "clang/Lex/LexDiagnostic.h"
|
2013-09-24 12:06:10 +08:00
|
|
|
#include "clang/Lex/LiteralSupport.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "clang/Lex/Preprocessor.h"
|
2012-01-21 00:52:43 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2013-01-25 04:50:46 +08:00
|
|
|
#include "llvm/ADT/StringExtras.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "llvm/ADT/StringSwitch.h"
|
2007-07-23 02:38:25 +08:00
|
|
|
#include "llvm/Support/Compiler.h"
|
2013-01-30 20:06:08 +08:00
|
|
|
#include "llvm/Support/ConvertUTF.h"
|
2007-04-29 15:12:06 +08:00
|
|
|
#include "llvm/Support/MemoryBuffer.h"
|
2013-02-09 09:10:25 +08:00
|
|
|
#include "UnicodeCharSets.h"
|
2011-08-11 12:06:15 +08:00
|
|
|
#include <cstring>
|
2006-06-18 13:43:12 +08:00
|
|
|
using namespace clang;
|
|
|
|
|
2007-10-07 16:47:24 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Token Class Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
/// isObjCAtKeyword - Return true if we have an ObjC keyword identifier.
|
2007-10-07 16:47:24 +08:00
|
|
|
bool Token::isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const {
|
2008-12-02 05:46:47 +08:00
|
|
|
if (IdentifierInfo *II = getIdentifierInfo())
|
|
|
|
return II->getObjCKeywordID() == objcKey;
|
|
|
|
return false;
|
2007-10-07 16:47:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// getObjCKeywordID - Return the ObjC keyword kind.
|
|
|
|
tok::ObjCKeywordKind Token::getObjCKeywordID() const {
|
|
|
|
IdentifierInfo *specId = getIdentifierInfo();
|
|
|
|
return specId ? specId->getObjCKeywordID() : tok::objc_not_keyword;
|
|
|
|
}
|
|
|
|
|
2007-12-13 09:59:49 +08:00
|
|
|
|
2007-10-07 16:47:24 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Lexer Class Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2011-12-20 10:48:34 +08:00
|
|
|
void Lexer::anchor() { }
|
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
void Lexer::InitLexer(const char *BufStart, const char *BufPtr,
|
2009-01-17 14:55:17 +08:00
|
|
|
const char *BufEnd) {
|
|
|
|
BufferStart = BufStart;
|
|
|
|
BufferPtr = BufPtr;
|
|
|
|
BufferEnd = BufEnd;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-17 14:55:17 +08:00
|
|
|
assert(BufEnd[0] == 0 &&
|
|
|
|
"We assume that the input buffer has a null character at the end"
|
|
|
|
" to simplify lexing!");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-04-09 08:01:04 +08:00
|
|
|
// Check whether we have a BOM in the beginning of the buffer. If yes - act
|
|
|
|
// accordingly. Right now we support only UTF-8 with and without BOM, so, just
|
|
|
|
// skip the UTF-8 BOM if it's present.
|
|
|
|
if (BufferStart == BufferPtr) {
|
|
|
|
// Determine the size of the BOM.
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef Buf(BufferStart, BufferEnd - BufferStart);
|
2011-05-11 01:11:21 +08:00
|
|
|
size_t BOMLength = llvm::StringSwitch<size_t>(Buf)
|
2011-04-09 08:01:04 +08:00
|
|
|
.StartsWith("\xEF\xBB\xBF", 3) // UTF-8 BOM
|
|
|
|
.Default(0);
|
|
|
|
|
|
|
|
// Skip the BOM.
|
|
|
|
BufferPtr += BOMLength;
|
|
|
|
}
|
|
|
|
|
2009-01-17 14:55:17 +08:00
|
|
|
Is_PragmaLexer = false;
|
2011-10-12 08:37:51 +08:00
|
|
|
CurrentConflictMarkerState = CMK_None;
|
2011-04-09 08:01:04 +08:00
|
|
|
|
2009-01-17 14:55:17 +08:00
|
|
|
// Start of the file is a start of line.
|
|
|
|
IsAtStartOfLine = true;
|
2013-09-19 08:41:32 +08:00
|
|
|
IsAtPhysicalStartOfLine = true;
|
|
|
|
|
|
|
|
HasLeadingSpace = false;
|
|
|
|
HasLeadingEmptyMacro = false;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-17 14:55:17 +08:00
|
|
|
// We are not after parsing a #.
|
|
|
|
ParsingPreprocessorDirective = false;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-17 14:55:17 +08:00
|
|
|
// We are not after parsing #include.
|
|
|
|
ParsingFilename = false;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-17 14:55:17 +08:00
|
|
|
// We are not in raw mode. Raw mode disables diagnostics and interpretation
|
|
|
|
// of tokens (e.g. identifiers, thus disabling macro expansion). It is used
|
|
|
|
// to quickly lex the tokens of the buffer, e.g. when handling a "#if 0" block
|
|
|
|
// or otherwise skipping over tokens.
|
|
|
|
LexingRawMode = false;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-17 14:55:17 +08:00
|
|
|
// Default to not keeping comments.
|
|
|
|
ExtendedTokenMode = 0;
|
|
|
|
}
|
|
|
|
|
2009-01-17 15:56:59 +08:00
|
|
|
/// Lexer constructor - Create a new lexer object for the specified buffer
|
|
|
|
/// with the specified preprocessor managing the lexing process. This lexer
|
|
|
|
/// assumes that the associated file buffer and Preprocessor objects will
|
|
|
|
/// outlive it, so it doesn't take ownership of either of them.
|
2009-11-30 12:18:44 +08:00
|
|
|
Lexer::Lexer(FileID FID, const llvm::MemoryBuffer *InputFile, Preprocessor &PP)
|
2009-01-17 16:03:42 +08:00
|
|
|
: PreprocessorLexer(&PP, FID),
|
|
|
|
FileLoc(PP.getSourceManager().getLocForStartOfFile(FID)),
|
2012-03-11 15:00:24 +08:00
|
|
|
LangOpts(PP.getLangOpts()) {
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-17 15:56:59 +08:00
|
|
|
InitLexer(InputFile->getBufferStart(), InputFile->getBufferStart(),
|
|
|
|
InputFile->getBufferEnd());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2013-02-22 02:53:19 +08:00
|
|
|
resetExtendedTokenMode();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Lexer::resetExtendedTokenMode() {
|
|
|
|
assert(PP && "Cannot reset token mode without a preprocessor");
|
|
|
|
if (LangOpts.TraditionalCPP)
|
|
|
|
SetKeepWhitespaceMode(true);
|
|
|
|
else
|
|
|
|
SetCommentRetentionState(PP->getCommentRetentionState());
|
2009-01-17 15:56:59 +08:00
|
|
|
}
|
2007-10-07 16:47:24 +08:00
|
|
|
|
2007-10-18 04:41:00 +08:00
|
|
|
/// Lexer constructor - Create a new raw lexer object. This object is only
|
2012-06-09 07:19:37 +08:00
|
|
|
/// suitable for calls to 'LexFromRawLexer'. This lexer assumes that the text
|
2008-10-12 09:15:46 +08:00
|
|
|
/// range will outlive it, so it doesn't take ownership of it.
|
2012-03-11 15:00:24 +08:00
|
|
|
Lexer::Lexer(SourceLocation fileloc, const LangOptions &langOpts,
|
2009-01-17 15:42:27 +08:00
|
|
|
const char *BufStart, const char *BufPtr, const char *BufEnd)
|
2012-03-11 15:00:24 +08:00
|
|
|
: FileLoc(fileloc), LangOpts(langOpts) {
|
2009-01-17 14:55:17 +08:00
|
|
|
|
|
|
|
InitLexer(BufStart, BufPtr, BufEnd);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-10-18 04:41:00 +08:00
|
|
|
// We *are* in raw mode.
|
|
|
|
LexingRawMode = true;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
|
2009-01-17 15:35:14 +08:00
|
|
|
/// Lexer constructor - Create a new raw lexer object. This object is only
|
2012-06-09 07:19:37 +08:00
|
|
|
/// suitable for calls to 'LexFromRawLexer'. This lexer assumes that the text
|
2009-01-17 15:35:14 +08:00
|
|
|
/// range will outlive it, so it doesn't take ownership of it.
|
2009-11-30 12:18:44 +08:00
|
|
|
Lexer::Lexer(FileID FID, const llvm::MemoryBuffer *FromFile,
|
2012-03-11 15:00:24 +08:00
|
|
|
const SourceManager &SM, const LangOptions &langOpts)
|
|
|
|
: FileLoc(SM.getLocForStartOfFile(FID)), LangOpts(langOpts) {
|
2009-01-17 15:35:14 +08:00
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
InitLexer(FromFile->getBufferStart(), FromFile->getBufferStart(),
|
2009-01-17 15:35:14 +08:00
|
|
|
FromFile->getBufferEnd());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-17 15:35:14 +08:00
|
|
|
// We *are* in raw mode.
|
|
|
|
LexingRawMode = true;
|
|
|
|
}
|
|
|
|
|
2009-01-17 16:27:52 +08:00
|
|
|
/// Create_PragmaLexer: Lexer constructor - Create a new lexer object for
|
|
|
|
/// _Pragma expansion. This has a variety of magic semantics that this method
|
|
|
|
/// sets up. It returns a new'd Lexer that must be delete'd when done.
|
|
|
|
///
|
|
|
|
/// On entrance to this routine, TokStartLoc is a macro location which has a
|
|
|
|
/// spelling loc that indicates the bytes to be lexed for the token and an
|
2011-07-14 16:20:40 +08:00
|
|
|
/// expansion location that indicates where all lexed tokens should be
|
2009-01-17 16:27:52 +08:00
|
|
|
/// "expanded from".
|
|
|
|
///
|
|
|
|
/// FIXME: It would really be nice to make _Pragma just be a wrapper around a
|
|
|
|
/// normal lexer that remaps tokens as they fly by. This would require making
|
|
|
|
/// Preprocessor::Lex virtual. Given that, we could just dump in a magic lexer
|
|
|
|
/// interface that could handle this stuff. This would pull GetMappedTokenLoc
|
|
|
|
/// out of the critical path of the lexer!
|
|
|
|
///
|
2009-09-09 23:08:12 +08:00
|
|
|
Lexer *Lexer::Create_PragmaLexer(SourceLocation SpellingLoc,
|
2011-07-14 16:20:40 +08:00
|
|
|
SourceLocation ExpansionLocStart,
|
|
|
|
SourceLocation ExpansionLocEnd,
|
2009-01-19 14:46:35 +08:00
|
|
|
unsigned TokLen, Preprocessor &PP) {
|
2009-01-17 16:27:52 +08:00
|
|
|
SourceManager &SM = PP.getSourceManager();
|
|
|
|
|
|
|
|
// Create the lexer as if we were going to lex the file normally.
|
2009-01-19 15:46:45 +08:00
|
|
|
FileID SpellingFID = SM.getFileID(SpellingLoc);
|
2009-11-30 12:18:44 +08:00
|
|
|
const llvm::MemoryBuffer *InputFile = SM.getBuffer(SpellingFID);
|
|
|
|
Lexer *L = new Lexer(SpellingFID, InputFile, PP);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-17 16:27:52 +08:00
|
|
|
// Now that the lexer is created, change the start/end locations so that we
|
|
|
|
// just lex the subsection of the file that we want. This is lexing from a
|
|
|
|
// scratch buffer.
|
|
|
|
const char *StrData = SM.getCharacterData(SpellingLoc);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-17 16:27:52 +08:00
|
|
|
L->BufferPtr = StrData;
|
|
|
|
L->BufferEnd = StrData+TokLen;
|
2009-03-08 16:08:45 +08:00
|
|
|
assert(L->BufferEnd[0] == 0 && "Buffer is not nul terminated!");
|
2009-01-17 16:27:52 +08:00
|
|
|
|
|
|
|
// Set the SourceLocation with the remapping information. This ensures that
|
|
|
|
// GetMappedTokenLoc will remap the tokens as they are lexed.
|
2011-07-26 11:03:05 +08:00
|
|
|
L->FileLoc = SM.createExpansionLoc(SM.getLocForStartOfFile(SpellingFID),
|
|
|
|
ExpansionLocStart,
|
|
|
|
ExpansionLocEnd, TokLen);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-17 16:27:52 +08:00
|
|
|
// Ensure that the lexer thinks it is inside a directive, so that end \n will
|
2011-02-28 10:37:51 +08:00
|
|
|
// return an EOD token.
|
2009-01-17 16:27:52 +08:00
|
|
|
L->ParsingPreprocessorDirective = true;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-17 16:27:52 +08:00
|
|
|
// This lexer really is for _Pragma.
|
|
|
|
L->Is_PragmaLexer = true;
|
|
|
|
return L;
|
|
|
|
}
|
|
|
|
|
2007-10-18 04:41:00 +08:00
|
|
|
|
2006-07-03 09:13:26 +08:00
|
|
|
/// Stringify - Convert the specified string into a C string, with surrounding
|
|
|
|
/// ""'s, and with escaped \ and " characters.
|
2006-07-15 13:23:31 +08:00
|
|
|
std::string Lexer::Stringify(const std::string &Str, bool Charify) {
|
2006-07-03 09:13:26 +08:00
|
|
|
std::string Result = Str;
|
2006-07-15 13:23:31 +08:00
|
|
|
char Quote = Charify ? '\'' : '"';
|
2006-07-03 09:13:26 +08:00
|
|
|
for (unsigned i = 0, e = Result.size(); i != e; ++i) {
|
2006-07-15 13:23:31 +08:00
|
|
|
if (Result[i] == '\\' || Result[i] == Quote) {
|
2006-07-03 09:13:26 +08:00
|
|
|
Result.insert(Result.begin()+i, '\\');
|
|
|
|
++i; ++e;
|
|
|
|
}
|
|
|
|
}
|
2006-07-15 13:23:31 +08:00
|
|
|
return Result;
|
2006-07-03 09:13:26 +08:00
|
|
|
}
|
|
|
|
|
2007-07-24 14:57:14 +08:00
|
|
|
/// Stringify - Convert the specified string into a C string by escaping '\'
|
|
|
|
/// and " characters. This does not add surrounding ""'s to the string.
|
2011-07-23 18:55:15 +08:00
|
|
|
void Lexer::Stringify(SmallVectorImpl<char> &Str) {
|
2007-07-24 14:57:14 +08:00
|
|
|
for (unsigned i = 0, e = Str.size(); i != e; ++i) {
|
|
|
|
if (Str[i] == '\\' || Str[i] == '"') {
|
|
|
|
Str.insert(Str.begin()+i, '\\');
|
|
|
|
++i; ++e;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-11-17 15:26:20 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Token Spelling
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2012-11-28 15:29:00 +08:00
|
|
|
/// \brief Slow case of getSpelling. Extract the characters comprising the
|
|
|
|
/// spelling of this token from the provided input buffer.
|
|
|
|
static size_t getSpellingSlow(const Token &Tok, const char *BufPtr,
|
|
|
|
const LangOptions &LangOpts, char *Spelling) {
|
|
|
|
assert(Tok.needsCleaning() && "getSpellingSlow called on simple token");
|
|
|
|
|
|
|
|
size_t Length = 0;
|
|
|
|
const char *BufEnd = BufPtr + Tok.getLength();
|
|
|
|
|
|
|
|
if (Tok.is(tok::string_literal)) {
|
|
|
|
// Munch the encoding-prefix and opening double-quote.
|
|
|
|
while (BufPtr < BufEnd) {
|
|
|
|
unsigned Size;
|
|
|
|
Spelling[Length++] = Lexer::getCharAndSizeNoWarn(BufPtr, Size, LangOpts);
|
|
|
|
BufPtr += Size;
|
|
|
|
|
|
|
|
if (Spelling[Length - 1] == '"')
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Raw string literals need special handling; trigraph expansion and line
|
|
|
|
// splicing do not occur within their d-char-sequence nor within their
|
|
|
|
// r-char-sequence.
|
|
|
|
if (Length >= 2 &&
|
|
|
|
Spelling[Length - 2] == 'R' && Spelling[Length - 1] == '"') {
|
|
|
|
// Search backwards from the end of the token to find the matching closing
|
|
|
|
// quote.
|
|
|
|
const char *RawEnd = BufEnd;
|
|
|
|
do --RawEnd; while (*RawEnd != '"');
|
|
|
|
size_t RawLength = RawEnd - BufPtr + 1;
|
|
|
|
|
|
|
|
// Everything between the quotes is included verbatim in the spelling.
|
|
|
|
memcpy(Spelling + Length, BufPtr, RawLength);
|
|
|
|
Length += RawLength;
|
|
|
|
BufPtr += RawLength;
|
|
|
|
|
|
|
|
// The rest of the token is lexed normally.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
while (BufPtr < BufEnd) {
|
|
|
|
unsigned Size;
|
|
|
|
Spelling[Length++] = Lexer::getCharAndSizeNoWarn(BufPtr, Size, LangOpts);
|
|
|
|
BufPtr += Size;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(Length < Tok.getLength() &&
|
|
|
|
"NeedsCleaning flag set on token that didn't need cleaning!");
|
|
|
|
return Length;
|
|
|
|
}
|
|
|
|
|
2011-03-08 15:59:04 +08:00
|
|
|
/// getSpelling() - Return the 'spelling' of this token. The spelling of a
|
|
|
|
/// token are the characters used to represent the token in the source file
|
|
|
|
/// after trigraph expansion and escaped-newline folding. In particular, this
|
|
|
|
/// wants to get the true, uncanonicalized, spelling of things like digraphs
|
|
|
|
/// UCNs, etc.
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef Lexer::getSpelling(SourceLocation loc,
|
2012-11-28 15:29:00 +08:00
|
|
|
SmallVectorImpl<char> &buffer,
|
|
|
|
const SourceManager &SM,
|
|
|
|
const LangOptions &options,
|
|
|
|
bool *invalid) {
|
2011-03-08 15:59:04 +08:00
|
|
|
// Break down the source location.
|
|
|
|
std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc);
|
|
|
|
|
|
|
|
// Try to the load the file buffer.
|
|
|
|
bool invalidTemp = false;
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
|
2011-03-08 15:59:04 +08:00
|
|
|
if (invalidTemp) {
|
|
|
|
if (invalid) *invalid = true;
|
2011-07-23 18:55:15 +08:00
|
|
|
return StringRef();
|
2011-03-08 15:59:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
const char *tokenBegin = file.data() + locInfo.second;
|
|
|
|
|
|
|
|
// Lex from the start of the given location.
|
|
|
|
Lexer lexer(SM.getLocForStartOfFile(locInfo.first), options,
|
|
|
|
file.begin(), tokenBegin, file.end());
|
|
|
|
Token token;
|
|
|
|
lexer.LexFromRawLexer(token);
|
|
|
|
|
|
|
|
unsigned length = token.getLength();
|
|
|
|
|
|
|
|
// Common case: no need for cleaning.
|
|
|
|
if (!token.needsCleaning())
|
2011-07-23 18:55:15 +08:00
|
|
|
return StringRef(tokenBegin, length);
|
2011-03-08 15:59:04 +08:00
|
|
|
|
2012-11-28 15:29:00 +08:00
|
|
|
// Hard case, we need to relex the characters into the string.
|
|
|
|
buffer.resize(length);
|
|
|
|
buffer.resize(getSpellingSlow(token, tokenBegin, options, buffer.data()));
|
2011-07-23 18:55:15 +08:00
|
|
|
return StringRef(buffer.data(), buffer.size());
|
2011-03-08 15:59:04 +08:00
|
|
|
}
|
|
|
|
|
2010-11-17 15:26:20 +08:00
|
|
|
/// getSpelling() - Return the 'spelling' of this token. The spelling of a
|
|
|
|
/// token are the characters used to represent the token in the source file
|
|
|
|
/// after trigraph expansion and escaped-newline folding. In particular, this
|
|
|
|
/// wants to get the true, uncanonicalized, spelling of things like digraphs
|
|
|
|
/// UCNs, etc.
|
|
|
|
std::string Lexer::getSpelling(const Token &Tok, const SourceManager &SourceMgr,
|
2012-03-11 15:00:24 +08:00
|
|
|
const LangOptions &LangOpts, bool *Invalid) {
|
2010-11-17 15:26:20 +08:00
|
|
|
assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
|
2012-11-28 15:29:00 +08:00
|
|
|
|
2010-11-17 15:26:20 +08:00
|
|
|
bool CharDataInvalid = false;
|
2012-11-28 15:29:00 +08:00
|
|
|
const char *TokStart = SourceMgr.getCharacterData(Tok.getLocation(),
|
2010-11-17 15:26:20 +08:00
|
|
|
&CharDataInvalid);
|
|
|
|
if (Invalid)
|
|
|
|
*Invalid = CharDataInvalid;
|
|
|
|
if (CharDataInvalid)
|
|
|
|
return std::string();
|
2012-11-28 15:29:00 +08:00
|
|
|
|
|
|
|
// If this token contains nothing interesting, return it directly.
|
2010-11-17 15:26:20 +08:00
|
|
|
if (!Tok.needsCleaning())
|
2012-11-28 15:29:00 +08:00
|
|
|
return std::string(TokStart, TokStart + Tok.getLength());
|
|
|
|
|
2010-11-17 15:26:20 +08:00
|
|
|
std::string Result;
|
2012-11-28 15:29:00 +08:00
|
|
|
Result.resize(Tok.getLength());
|
|
|
|
Result.resize(getSpellingSlow(Tok, TokStart, LangOpts, &*Result.begin()));
|
2010-11-17 15:26:20 +08:00
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// getSpelling - This method is used to get the spelling of a token into a
|
|
|
|
/// preallocated buffer, instead of as an std::string. The caller is required
|
|
|
|
/// to allocate enough space for the token, which is guaranteed to be at least
|
|
|
|
/// Tok.getLength() bytes long. The actual length of the token is returned.
|
|
|
|
///
|
|
|
|
/// Note that this method may do two possible things: it may either fill in
|
|
|
|
/// the buffer specified with characters, or it may *change the input pointer*
|
|
|
|
/// to point to a constant buffer with the data already in it (avoiding a
|
|
|
|
/// copy). The caller is not allowed to modify the returned buffer pointer
|
|
|
|
/// if an internal buffer is returned.
|
|
|
|
unsigned Lexer::getSpelling(const Token &Tok, const char *&Buffer,
|
|
|
|
const SourceManager &SourceMgr,
|
2012-03-11 15:00:24 +08:00
|
|
|
const LangOptions &LangOpts, bool *Invalid) {
|
2010-11-17 15:26:20 +08:00
|
|
|
assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
|
2010-12-22 16:23:18 +08:00
|
|
|
|
|
|
|
const char *TokStart = 0;
|
|
|
|
// NOTE: this has to be checked *before* testing for an IdentifierInfo.
|
|
|
|
if (Tok.is(tok::raw_identifier))
|
|
|
|
TokStart = Tok.getRawIdentifierData();
|
2013-01-25 04:50:46 +08:00
|
|
|
else if (!Tok.hasUCN()) {
|
|
|
|
if (const IdentifierInfo *II = Tok.getIdentifierInfo()) {
|
|
|
|
// Just return the string from the identifier table, which is very quick.
|
|
|
|
Buffer = II->getNameStart();
|
|
|
|
return II->getLength();
|
|
|
|
}
|
2010-11-17 15:26:20 +08:00
|
|
|
}
|
2010-12-22 16:23:18 +08:00
|
|
|
|
|
|
|
// NOTE: this can be checked even after testing for an IdentifierInfo.
|
2010-11-17 15:26:20 +08:00
|
|
|
if (Tok.isLiteral())
|
|
|
|
TokStart = Tok.getLiteralData();
|
2010-12-22 16:23:18 +08:00
|
|
|
|
2010-11-17 15:26:20 +08:00
|
|
|
if (TokStart == 0) {
|
2010-12-22 16:23:18 +08:00
|
|
|
// Compute the start of the token in the input lexer buffer.
|
2010-11-17 15:26:20 +08:00
|
|
|
bool CharDataInvalid = false;
|
|
|
|
TokStart = SourceMgr.getCharacterData(Tok.getLocation(), &CharDataInvalid);
|
|
|
|
if (Invalid)
|
|
|
|
*Invalid = CharDataInvalid;
|
|
|
|
if (CharDataInvalid) {
|
|
|
|
Buffer = "";
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
2010-12-22 16:23:18 +08:00
|
|
|
|
2010-11-17 15:26:20 +08:00
|
|
|
// If this token contains nothing interesting, return it directly.
|
|
|
|
if (!Tok.needsCleaning()) {
|
|
|
|
Buffer = TokStart;
|
|
|
|
return Tok.getLength();
|
|
|
|
}
|
2010-12-22 16:23:18 +08:00
|
|
|
|
2010-11-17 15:26:20 +08:00
|
|
|
// Otherwise, hard case, relex the characters into the string.
|
2012-11-28 15:29:00 +08:00
|
|
|
return getSpellingSlow(Tok, TokStart, LangOpts, const_cast<char*>(Buffer));
|
2010-11-17 15:26:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-10-18 05:18:47 +08:00
|
|
|
/// MeasureTokenLength - Relex the token at the specified location and return
|
|
|
|
/// its length in bytes in the input file. If the token needs cleaning (e.g.
|
|
|
|
/// includes a trigraph or an escaped newline) then this count includes bytes
|
|
|
|
/// that are part of that.
|
|
|
|
unsigned Lexer::MeasureTokenLength(SourceLocation Loc,
|
2009-04-15 07:22:57 +08:00
|
|
|
const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts) {
|
2013-01-08 03:16:18 +08:00
|
|
|
Token TheTok;
|
|
|
|
if (getRawToken(Loc, TheTok, SM, LangOpts))
|
|
|
|
return 0;
|
|
|
|
return TheTok.getLength();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Relex the token at the specified location.
|
|
|
|
/// \returns true if there was a failure, false on success.
|
|
|
|
bool Lexer::getRawToken(SourceLocation Loc, Token &Result,
|
|
|
|
const SourceManager &SM,
|
2013-08-20 08:07:23 +08:00
|
|
|
const LangOptions &LangOpts,
|
|
|
|
bool IgnoreWhiteSpace) {
|
2007-10-18 05:18:47 +08:00
|
|
|
// TODO: this could be special cased for common tokens like identifiers, ')',
|
|
|
|
// etc to make this faster, if it mattered. Just look at StrData[0] to handle
|
2009-09-09 23:08:12 +08:00
|
|
|
// all obviously single-char tokens. This could use
|
2007-10-18 05:18:47 +08:00
|
|
|
// Lexer::isObviouslySimpleCharacter for example to handle identifiers or
|
|
|
|
// something.
|
2009-01-26 08:43:02 +08:00
|
|
|
|
|
|
|
// If this comes from a macro expansion, we really do want the macro name, not
|
|
|
|
// the token this macro expanded to.
|
2011-07-26 00:49:02 +08:00
|
|
|
Loc = SM.getExpansionLoc(Loc);
|
2009-01-27 06:24:27 +08:00
|
|
|
std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
|
2010-03-16 08:06:06 +08:00
|
|
|
bool Invalid = false;
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
|
2010-03-16 08:06:06 +08:00
|
|
|
if (Invalid)
|
2013-01-08 03:16:18 +08:00
|
|
|
return true;
|
2010-03-16 22:14:31 +08:00
|
|
|
|
|
|
|
const char *StrData = Buffer.data()+LocInfo.second;
|
2009-01-17 16:30:10 +08:00
|
|
|
|
2013-08-20 08:07:23 +08:00
|
|
|
if (!IgnoreWhiteSpace && isWhitespace(StrData[0]))
|
2013-01-08 03:16:18 +08:00
|
|
|
return true;
|
2010-01-23 03:49:59 +08:00
|
|
|
|
2007-10-18 05:18:47 +08:00
|
|
|
// Create a lexer starting at the beginning of this token.
|
2010-09-30 09:03:03 +08:00
|
|
|
Lexer TheLexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts,
|
|
|
|
Buffer.begin(), StrData, Buffer.end());
|
2009-10-14 23:04:18 +08:00
|
|
|
TheLexer.SetCommentRetentionState(true);
|
2013-01-08 03:16:18 +08:00
|
|
|
TheLexer.LexFromRawLexer(Result);
|
|
|
|
return false;
|
2007-10-18 05:18:47 +08:00
|
|
|
}
|
|
|
|
|
2011-08-17 08:31:23 +08:00
|
|
|
static SourceLocation getBeginningOfFileToken(SourceLocation Loc,
|
|
|
|
const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts) {
|
|
|
|
assert(Loc.isFileID());
|
2010-07-23 04:22:31 +08:00
|
|
|
std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
|
2011-02-01 06:42:36 +08:00
|
|
|
if (LocInfo.first.isInvalid())
|
|
|
|
return Loc;
|
|
|
|
|
2010-07-23 04:22:31 +08:00
|
|
|
bool Invalid = false;
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
|
2010-07-23 04:22:31 +08:00
|
|
|
if (Invalid)
|
|
|
|
return Loc;
|
|
|
|
|
|
|
|
// Back up from the current location until we hit the beginning of a line
|
|
|
|
// (or the buffer). We'll relex from that point.
|
|
|
|
const char *BufStart = Buffer.data();
|
2011-02-01 06:42:36 +08:00
|
|
|
if (LocInfo.second >= Buffer.size())
|
|
|
|
return Loc;
|
|
|
|
|
2010-07-23 04:22:31 +08:00
|
|
|
const char *StrData = BufStart+LocInfo.second;
|
|
|
|
if (StrData[0] == '\n' || StrData[0] == '\r')
|
|
|
|
return Loc;
|
|
|
|
|
|
|
|
const char *LexStart = StrData;
|
|
|
|
while (LexStart != BufStart) {
|
|
|
|
if (LexStart[0] == '\n' || LexStart[0] == '\r') {
|
|
|
|
++LexStart;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
--LexStart;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a lexer starting at the beginning of this token.
|
2011-09-20 04:40:19 +08:00
|
|
|
SourceLocation LexerStartLoc = Loc.getLocWithOffset(-LocInfo.second);
|
2010-07-23 04:22:31 +08:00
|
|
|
Lexer TheLexer(LexerStartLoc, LangOpts, BufStart, LexStart, Buffer.end());
|
|
|
|
TheLexer.SetCommentRetentionState(true);
|
|
|
|
|
|
|
|
// Lex tokens until we find the token that contains the source location.
|
|
|
|
Token TheTok;
|
|
|
|
do {
|
|
|
|
TheLexer.LexFromRawLexer(TheTok);
|
|
|
|
|
|
|
|
if (TheLexer.getBufferLocation() > StrData) {
|
|
|
|
// Lexing this token has taken the lexer past the source location we're
|
|
|
|
// looking for. If the current token encompasses our source location,
|
|
|
|
// return the beginning of that token.
|
|
|
|
if (TheLexer.getBufferLocation() - TheTok.getLength() <= StrData)
|
|
|
|
return TheTok.getLocation();
|
|
|
|
|
|
|
|
// We ended up skipping over the source location entirely, which means
|
|
|
|
// that it points into whitespace. We're done here.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} while (TheTok.getKind() != tok::eof);
|
|
|
|
|
|
|
|
// We've passed our source location; just return the original source location.
|
|
|
|
return Loc;
|
|
|
|
}
|
|
|
|
|
2011-08-17 08:31:23 +08:00
|
|
|
SourceLocation Lexer::GetBeginningOfToken(SourceLocation Loc,
|
|
|
|
const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts) {
|
|
|
|
if (Loc.isFileID())
|
|
|
|
return getBeginningOfFileToken(Loc, SM, LangOpts);
|
|
|
|
|
|
|
|
if (!SM.isMacroArgExpansion(Loc))
|
|
|
|
return Loc;
|
|
|
|
|
|
|
|
SourceLocation FileLoc = SM.getSpellingLoc(Loc);
|
|
|
|
SourceLocation BeginFileLoc = getBeginningOfFileToken(FileLoc, SM, LangOpts);
|
|
|
|
std::pair<FileID, unsigned> FileLocInfo = SM.getDecomposedLoc(FileLoc);
|
2012-01-15 17:03:45 +08:00
|
|
|
std::pair<FileID, unsigned> BeginFileLocInfo
|
|
|
|
= SM.getDecomposedLoc(BeginFileLoc);
|
2011-08-17 08:31:23 +08:00
|
|
|
assert(FileLocInfo.first == BeginFileLocInfo.first &&
|
|
|
|
FileLocInfo.second >= BeginFileLocInfo.second);
|
2012-01-15 17:03:45 +08:00
|
|
|
return Loc.getLocWithOffset(BeginFileLocInfo.second - FileLocInfo.second);
|
2011-08-17 08:31:23 +08:00
|
|
|
}
|
|
|
|
|
2010-07-21 04:18:03 +08:00
|
|
|
namespace {
|
|
|
|
enum PreambleDirectiveKind {
|
|
|
|
PDK_Skipped,
|
|
|
|
PDK_StartIf,
|
|
|
|
PDK_EndIf,
|
|
|
|
PDK_Unknown
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
Introduce basic support for loading a precompiled preamble while
reparsing an ASTUnit. When saving a preamble, create a buffer larger
than the actual file we're working with but fill everything from the
end of the preamble to the end of the file with spaces (so the lexer
will quickly skip them). When we load the file, create a buffer of the
same size, filling it with the file and then spaces. Then, instruct
the lexer to start lexing after the preamble, therefore continuing the
parse from the spot where the preamble left off.
It's now possible to perform a simple preamble build + parse (+
reparse) with ASTUnit. However, one has to disable a bunch of checking
in the PCH reader to do so. That part isn't committed; it will likely
be handled with some other kind of flag (e.g., -fno-validate-pch).
As part of this, fix some issues with null termination of the memory
buffers created for the preamble; we were trying to explicitly
NULL-terminate them, even though they were also getting implicitly
NULL terminated, leading to excess warnings about NULL characters in
source files.
llvm-svn: 109445
2010-07-27 05:36:20 +08:00
|
|
|
std::pair<unsigned, bool>
|
2011-08-26 04:39:19 +08:00
|
|
|
Lexer::ComputePreamble(const llvm::MemoryBuffer *Buffer,
|
2012-03-11 15:00:24 +08:00
|
|
|
const LangOptions &LangOpts, unsigned MaxLines) {
|
2010-07-21 04:18:03 +08:00
|
|
|
// Create a lexer starting at the beginning of the file. Note that we use a
|
|
|
|
// "fake" file source location at offset 1 so that the lexer will track our
|
|
|
|
// position within the file.
|
|
|
|
const unsigned StartOffset = 1;
|
2012-10-25 09:51:45 +08:00
|
|
|
SourceLocation FileLoc = SourceLocation::getFromRawEncoding(StartOffset);
|
|
|
|
Lexer TheLexer(FileLoc, LangOpts, Buffer->getBufferStart(),
|
2010-07-21 04:18:03 +08:00
|
|
|
Buffer->getBufferStart(), Buffer->getBufferEnd());
|
2013-04-20 07:24:25 +08:00
|
|
|
TheLexer.SetCommentRetentionState(true);
|
2012-10-25 09:51:45 +08:00
|
|
|
|
|
|
|
// StartLoc will differ from FileLoc if there is a BOM that was skipped.
|
|
|
|
SourceLocation StartLoc = TheLexer.getSourceLocation();
|
|
|
|
|
2010-07-21 04:18:03 +08:00
|
|
|
bool InPreprocessorDirective = false;
|
|
|
|
Token TheTok;
|
|
|
|
Token IfStartTok;
|
|
|
|
unsigned IfCount = 0;
|
2013-04-20 07:24:25 +08:00
|
|
|
SourceLocation ActiveCommentLoc;
|
2011-09-04 11:32:04 +08:00
|
|
|
|
|
|
|
unsigned MaxLineOffset = 0;
|
|
|
|
if (MaxLines) {
|
|
|
|
const char *CurPtr = Buffer->getBufferStart();
|
|
|
|
unsigned CurLine = 0;
|
|
|
|
while (CurPtr != Buffer->getBufferEnd()) {
|
|
|
|
char ch = *CurPtr++;
|
|
|
|
if (ch == '\n') {
|
|
|
|
++CurLine;
|
|
|
|
if (CurLine == MaxLines)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (CurPtr != Buffer->getBufferEnd())
|
|
|
|
MaxLineOffset = CurPtr - Buffer->getBufferStart();
|
|
|
|
}
|
2010-08-10 04:45:32 +08:00
|
|
|
|
2010-07-21 04:18:03 +08:00
|
|
|
do {
|
|
|
|
TheLexer.LexFromRawLexer(TheTok);
|
|
|
|
|
|
|
|
if (InPreprocessorDirective) {
|
|
|
|
// If we've hit the end of the file, we're done.
|
|
|
|
if (TheTok.getKind() == tok::eof) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we haven't hit the end of the preprocessor directive, skip this
|
|
|
|
// token.
|
|
|
|
if (!TheTok.isAtStartOfLine())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// We've passed the end of the preprocessor directive, and will look
|
|
|
|
// at this token again below.
|
|
|
|
InPreprocessorDirective = false;
|
|
|
|
}
|
|
|
|
|
2010-08-10 04:45:32 +08:00
|
|
|
// Keep track of the # of lines in the preamble.
|
|
|
|
if (TheTok.isAtStartOfLine()) {
|
2011-09-04 11:32:04 +08:00
|
|
|
unsigned TokOffset = TheTok.getLocation().getRawEncoding() - StartOffset;
|
2010-08-10 04:45:32 +08:00
|
|
|
|
|
|
|
// If we were asked to limit the number of lines in the preamble,
|
|
|
|
// and we're about to exceed that limit, we're done.
|
2011-09-04 11:32:04 +08:00
|
|
|
if (MaxLineOffset && TokOffset >= MaxLineOffset)
|
2010-08-10 04:45:32 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-07-21 04:18:03 +08:00
|
|
|
// Comments are okay; skip over them.
|
2013-04-20 07:24:25 +08:00
|
|
|
if (TheTok.getKind() == tok::comment) {
|
|
|
|
if (ActiveCommentLoc.isInvalid())
|
|
|
|
ActiveCommentLoc = TheTok.getLocation();
|
2010-07-21 04:18:03 +08:00
|
|
|
continue;
|
2013-04-20 07:24:25 +08:00
|
|
|
}
|
2010-07-21 04:18:03 +08:00
|
|
|
|
|
|
|
if (TheTok.isAtStartOfLine() && TheTok.getKind() == tok::hash) {
|
|
|
|
// This is the start of a preprocessor directive.
|
|
|
|
Token HashTok = TheTok;
|
|
|
|
InPreprocessorDirective = true;
|
2013-04-20 07:24:25 +08:00
|
|
|
ActiveCommentLoc = SourceLocation();
|
2010-07-21 04:18:03 +08:00
|
|
|
|
2011-07-20 08:14:37 +08:00
|
|
|
// Figure out which directive this is. Since we're lexing raw tokens,
|
2010-07-21 04:18:03 +08:00
|
|
|
// we don't have an identifier table available. Instead, just look at
|
|
|
|
// the raw identifier to recognize and categorize preprocessor directives.
|
|
|
|
TheLexer.LexFromRawLexer(TheTok);
|
2010-12-22 16:23:18 +08:00
|
|
|
if (TheTok.getKind() == tok::raw_identifier && !TheTok.needsCleaning()) {
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef Keyword(TheTok.getRawIdentifierData(),
|
2010-12-22 16:23:18 +08:00
|
|
|
TheTok.getLength());
|
2010-07-21 04:18:03 +08:00
|
|
|
PreambleDirectiveKind PDK
|
|
|
|
= llvm::StringSwitch<PreambleDirectiveKind>(Keyword)
|
|
|
|
.Case("include", PDK_Skipped)
|
|
|
|
.Case("__include_macros", PDK_Skipped)
|
|
|
|
.Case("define", PDK_Skipped)
|
|
|
|
.Case("undef", PDK_Skipped)
|
|
|
|
.Case("line", PDK_Skipped)
|
|
|
|
.Case("error", PDK_Skipped)
|
|
|
|
.Case("pragma", PDK_Skipped)
|
|
|
|
.Case("import", PDK_Skipped)
|
|
|
|
.Case("include_next", PDK_Skipped)
|
|
|
|
.Case("warning", PDK_Skipped)
|
|
|
|
.Case("ident", PDK_Skipped)
|
|
|
|
.Case("sccs", PDK_Skipped)
|
|
|
|
.Case("assert", PDK_Skipped)
|
|
|
|
.Case("unassert", PDK_Skipped)
|
|
|
|
.Case("if", PDK_StartIf)
|
|
|
|
.Case("ifdef", PDK_StartIf)
|
|
|
|
.Case("ifndef", PDK_StartIf)
|
|
|
|
.Case("elif", PDK_Skipped)
|
|
|
|
.Case("else", PDK_Skipped)
|
|
|
|
.Case("endif", PDK_EndIf)
|
|
|
|
.Default(PDK_Unknown);
|
|
|
|
|
|
|
|
switch (PDK) {
|
|
|
|
case PDK_Skipped:
|
|
|
|
continue;
|
|
|
|
|
|
|
|
case PDK_StartIf:
|
|
|
|
if (IfCount == 0)
|
|
|
|
IfStartTok = HashTok;
|
|
|
|
|
|
|
|
++IfCount;
|
|
|
|
continue;
|
|
|
|
|
|
|
|
case PDK_EndIf:
|
|
|
|
// Mismatched #endif. The preamble ends here.
|
|
|
|
if (IfCount == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
--IfCount;
|
|
|
|
continue;
|
|
|
|
|
|
|
|
case PDK_Unknown:
|
|
|
|
// We don't know what this directive is; stop at the '#'.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We only end up here if we didn't recognize the preprocessor
|
|
|
|
// directive or it was one that can't occur in the preamble at this
|
|
|
|
// point. Roll back the current token to the location of the '#'.
|
|
|
|
InPreprocessorDirective = false;
|
|
|
|
TheTok = HashTok;
|
|
|
|
}
|
|
|
|
|
2010-08-10 04:45:32 +08:00
|
|
|
// We hit a token that we don't recognize as being in the
|
|
|
|
// "preprocessing only" part of the file, so we're no longer in
|
|
|
|
// the preamble.
|
2010-07-21 04:18:03 +08:00
|
|
|
break;
|
|
|
|
} while (true);
|
|
|
|
|
2013-04-20 07:24:25 +08:00
|
|
|
SourceLocation End;
|
|
|
|
if (IfCount)
|
|
|
|
End = IfStartTok.getLocation();
|
|
|
|
else if (ActiveCommentLoc.isValid())
|
|
|
|
End = ActiveCommentLoc; // don't truncate a decl comment.
|
|
|
|
else
|
|
|
|
End = TheTok.getLocation();
|
|
|
|
|
Introduce basic support for loading a precompiled preamble while
reparsing an ASTUnit. When saving a preamble, create a buffer larger
than the actual file we're working with but fill everything from the
end of the preamble to the end of the file with spaces (so the lexer
will quickly skip them). When we load the file, create a buffer of the
same size, filling it with the file and then spaces. Then, instruct
the lexer to start lexing after the preamble, therefore continuing the
parse from the spot where the preamble left off.
It's now possible to perform a simple preamble build + parse (+
reparse) with ASTUnit. However, one has to disable a bunch of checking
in the PCH reader to do so. That part isn't committed; it will likely
be handled with some other kind of flag (e.g., -fno-validate-pch).
As part of this, fix some issues with null termination of the memory
buffers created for the preamble; we were trying to explicitly
NULL-terminate them, even though they were also getting implicitly
NULL terminated, leading to excess warnings about NULL characters in
source files.
llvm-svn: 109445
2010-07-27 05:36:20 +08:00
|
|
|
return std::make_pair(End.getRawEncoding() - StartLoc.getRawEncoding(),
|
|
|
|
IfCount? IfStartTok.isAtStartOfLine()
|
|
|
|
: TheTok.isAtStartOfLine());
|
2010-07-21 04:18:03 +08:00
|
|
|
}
|
|
|
|
|
2010-11-17 15:05:50 +08:00
|
|
|
|
|
|
|
/// AdvanceToTokenCharacter - Given a location that specifies the start of a
|
|
|
|
/// token, return a new location that specifies a character within the token.
|
|
|
|
SourceLocation Lexer::AdvanceToTokenCharacter(SourceLocation TokStart,
|
|
|
|
unsigned CharNo,
|
|
|
|
const SourceManager &SM,
|
2012-03-11 15:00:24 +08:00
|
|
|
const LangOptions &LangOpts) {
|
2011-07-14 16:20:40 +08:00
|
|
|
// Figure out how many physical characters away the specified expansion
|
2010-11-17 15:05:50 +08:00
|
|
|
// character is. This needs to take into consideration newlines and
|
|
|
|
// trigraphs.
|
|
|
|
bool Invalid = false;
|
|
|
|
const char *TokPtr = SM.getCharacterData(TokStart, &Invalid);
|
|
|
|
|
|
|
|
// If they request the first char of the token, we're trivially done.
|
|
|
|
if (Invalid || (CharNo == 0 && Lexer::isObviouslySimpleCharacter(*TokPtr)))
|
|
|
|
return TokStart;
|
|
|
|
|
|
|
|
unsigned PhysOffset = 0;
|
|
|
|
|
|
|
|
// The usual case is that tokens don't contain anything interesting. Skip
|
|
|
|
// over the uninteresting characters. If a token only consists of simple
|
|
|
|
// chars, this method is extremely fast.
|
|
|
|
while (Lexer::isObviouslySimpleCharacter(*TokPtr)) {
|
|
|
|
if (CharNo == 0)
|
2011-09-20 04:40:19 +08:00
|
|
|
return TokStart.getLocWithOffset(PhysOffset);
|
2010-11-17 15:05:50 +08:00
|
|
|
++TokPtr, --CharNo, ++PhysOffset;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have a character that may be a trigraph or escaped newline, use a
|
|
|
|
// lexer to parse it correctly.
|
|
|
|
for (; CharNo; --CharNo) {
|
|
|
|
unsigned Size;
|
2012-03-11 15:00:24 +08:00
|
|
|
Lexer::getCharAndSizeNoWarn(TokPtr, Size, LangOpts);
|
2010-11-17 15:05:50 +08:00
|
|
|
TokPtr += Size;
|
|
|
|
PhysOffset += Size;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Final detail: if we end up on an escaped newline, we want to return the
|
|
|
|
// location of the actual byte of the token. For example foo\<newline>bar
|
|
|
|
// advanced by 3 should return the location of b, not of \\. One compounding
|
|
|
|
// detail of this is that the escape may be made by a trigraph.
|
|
|
|
if (!Lexer::isObviouslySimpleCharacter(*TokPtr))
|
|
|
|
PhysOffset += Lexer::SkipEscapedNewLines(TokPtr)-TokPtr;
|
|
|
|
|
2011-09-20 04:40:19 +08:00
|
|
|
return TokStart.getLocWithOffset(PhysOffset);
|
2010-11-17 15:05:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Computes the source location just past the end of the
|
|
|
|
/// token at this source location.
|
|
|
|
///
|
|
|
|
/// This routine can be used to produce a source location that
|
|
|
|
/// points just past the end of the token referenced by \p Loc, and
|
|
|
|
/// is generally used when a diagnostic needs to point just after a
|
|
|
|
/// token where it expected something different that it received. If
|
|
|
|
/// the returned source location would not be meaningful (e.g., if
|
|
|
|
/// it points into a macro), this routine returns an invalid
|
|
|
|
/// source location.
|
|
|
|
///
|
|
|
|
/// \param Offset an offset from the end of the token, where the source
|
|
|
|
/// location should refer to. The default offset (0) produces a source
|
|
|
|
/// location pointing just past the end of the token; an offset of 1 produces
|
|
|
|
/// a source location pointing to the last character in the token, etc.
|
|
|
|
SourceLocation Lexer::getLocForEndOfToken(SourceLocation Loc, unsigned Offset,
|
|
|
|
const SourceManager &SM,
|
2012-03-11 15:00:24 +08:00
|
|
|
const LangOptions &LangOpts) {
|
2011-06-25 01:58:59 +08:00
|
|
|
if (Loc.isInvalid())
|
2010-11-17 15:05:50 +08:00
|
|
|
return SourceLocation();
|
2011-06-25 01:58:59 +08:00
|
|
|
|
|
|
|
if (Loc.isMacroID()) {
|
2012-03-11 15:00:24 +08:00
|
|
|
if (Offset > 0 || !isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc))
|
2011-07-14 16:20:40 +08:00
|
|
|
return SourceLocation(); // Points inside the macro expansion.
|
2011-06-25 01:58:59 +08:00
|
|
|
}
|
|
|
|
|
2012-03-11 15:00:24 +08:00
|
|
|
unsigned Len = Lexer::MeasureTokenLength(Loc, SM, LangOpts);
|
2010-11-17 15:05:50 +08:00
|
|
|
if (Len > Offset)
|
|
|
|
Len = Len - Offset;
|
|
|
|
else
|
|
|
|
return Loc;
|
|
|
|
|
2011-09-20 04:40:19 +08:00
|
|
|
return Loc.getLocWithOffset(Len);
|
2010-11-17 15:05:50 +08:00
|
|
|
}
|
|
|
|
|
2011-07-08 05:54:45 +08:00
|
|
|
/// \brief Returns true if the given MacroID location points at the first
|
2011-07-14 16:20:40 +08:00
|
|
|
/// token of the macro expansion.
|
|
|
|
bool Lexer::isAtStartOfMacroExpansion(SourceLocation loc,
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
const SourceManager &SM,
|
2012-01-19 23:59:08 +08:00
|
|
|
const LangOptions &LangOpts,
|
|
|
|
SourceLocation *MacroBegin) {
|
2011-07-08 05:54:45 +08:00
|
|
|
assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc");
|
|
|
|
|
2013-05-17 05:37:39 +08:00
|
|
|
SourceLocation expansionLoc;
|
|
|
|
if (!SM.isAtStartOfImmediateMacroExpansion(loc, &expansionLoc))
|
|
|
|
return false;
|
|
|
|
|
2012-01-19 23:59:08 +08:00
|
|
|
if (expansionLoc.isFileID()) {
|
|
|
|
// No other macro expansions, this is the first.
|
|
|
|
if (MacroBegin)
|
|
|
|
*MacroBegin = expansionLoc;
|
|
|
|
return true;
|
|
|
|
}
|
2011-07-08 05:54:45 +08:00
|
|
|
|
2012-01-19 23:59:08 +08:00
|
|
|
return isAtStartOfMacroExpansion(expansionLoc, SM, LangOpts, MacroBegin);
|
2011-07-08 05:54:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Returns true if the given MacroID location points at the last
|
2011-07-14 16:20:40 +08:00
|
|
|
/// token of the macro expansion.
|
|
|
|
bool Lexer::isAtEndOfMacroExpansion(SourceLocation loc,
|
2012-01-19 23:59:08 +08:00
|
|
|
const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts,
|
|
|
|
SourceLocation *MacroEnd) {
|
2011-07-08 05:54:45 +08:00
|
|
|
assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc");
|
|
|
|
|
|
|
|
SourceLocation spellLoc = SM.getSpellingLoc(loc);
|
|
|
|
unsigned tokLen = MeasureTokenLength(spellLoc, SM, LangOpts);
|
|
|
|
if (tokLen == 0)
|
|
|
|
return false;
|
|
|
|
|
2013-05-17 05:37:39 +08:00
|
|
|
SourceLocation afterLoc = loc.getLocWithOffset(tokLen);
|
|
|
|
SourceLocation expansionLoc;
|
|
|
|
if (!SM.isAtEndOfImmediateMacroExpansion(afterLoc, &expansionLoc))
|
|
|
|
return false;
|
2011-08-24 05:02:30 +08:00
|
|
|
|
2012-01-19 23:59:08 +08:00
|
|
|
if (expansionLoc.isFileID()) {
|
|
|
|
// No other macro expansions.
|
|
|
|
if (MacroEnd)
|
|
|
|
*MacroEnd = expansionLoc;
|
|
|
|
return true;
|
|
|
|
}
|
2011-07-08 05:54:45 +08:00
|
|
|
|
2012-01-19 23:59:08 +08:00
|
|
|
return isAtEndOfMacroExpansion(expansionLoc, SM, LangOpts, MacroEnd);
|
2011-07-08 05:54:45 +08:00
|
|
|
}
|
|
|
|
|
2012-02-03 13:58:29 +08:00
|
|
|
static CharSourceRange makeRangeFromFileLocs(CharSourceRange Range,
|
2012-01-21 00:52:43 +08:00
|
|
|
const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts) {
|
2012-02-03 13:58:29 +08:00
|
|
|
SourceLocation Begin = Range.getBegin();
|
|
|
|
SourceLocation End = Range.getEnd();
|
2012-01-21 00:52:43 +08:00
|
|
|
assert(Begin.isFileID() && End.isFileID());
|
2012-02-03 13:58:29 +08:00
|
|
|
if (Range.isTokenRange()) {
|
|
|
|
End = Lexer::getLocForEndOfToken(End, 0, SM,LangOpts);
|
|
|
|
if (End.isInvalid())
|
|
|
|
return CharSourceRange();
|
|
|
|
}
|
2012-01-21 00:52:43 +08:00
|
|
|
|
|
|
|
// Break down the source locations.
|
|
|
|
FileID FID;
|
|
|
|
unsigned BeginOffs;
|
|
|
|
llvm::tie(FID, BeginOffs) = SM.getDecomposedLoc(Begin);
|
|
|
|
if (FID.isInvalid())
|
|
|
|
return CharSourceRange();
|
|
|
|
|
|
|
|
unsigned EndOffs;
|
|
|
|
if (!SM.isInFileID(End, FID, &EndOffs) ||
|
|
|
|
BeginOffs > EndOffs)
|
|
|
|
return CharSourceRange();
|
|
|
|
|
|
|
|
return CharSourceRange::getCharRange(Begin, End);
|
|
|
|
}
|
|
|
|
|
2012-02-03 13:58:29 +08:00
|
|
|
CharSourceRange Lexer::makeFileCharRange(CharSourceRange Range,
|
2012-01-19 23:59:14 +08:00
|
|
|
const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts) {
|
2012-02-03 13:58:29 +08:00
|
|
|
SourceLocation Begin = Range.getBegin();
|
|
|
|
SourceLocation End = Range.getEnd();
|
2012-01-21 00:52:43 +08:00
|
|
|
if (Begin.isInvalid() || End.isInvalid())
|
2012-01-19 23:59:14 +08:00
|
|
|
return CharSourceRange();
|
|
|
|
|
2012-01-21 00:52:43 +08:00
|
|
|
if (Begin.isFileID() && End.isFileID())
|
2012-02-03 13:58:29 +08:00
|
|
|
return makeRangeFromFileLocs(Range, SM, LangOpts);
|
2012-01-21 00:52:43 +08:00
|
|
|
|
|
|
|
if (Begin.isMacroID() && End.isFileID()) {
|
2012-01-19 23:59:14 +08:00
|
|
|
if (!isAtStartOfMacroExpansion(Begin, SM, LangOpts, &Begin))
|
|
|
|
return CharSourceRange();
|
2012-02-03 13:58:29 +08:00
|
|
|
Range.setBegin(Begin);
|
|
|
|
return makeRangeFromFileLocs(Range, SM, LangOpts);
|
2012-01-21 00:52:43 +08:00
|
|
|
}
|
2012-01-19 23:59:14 +08:00
|
|
|
|
2012-01-21 00:52:43 +08:00
|
|
|
if (Begin.isFileID() && End.isMacroID()) {
|
2012-02-03 13:58:29 +08:00
|
|
|
if ((Range.isTokenRange() && !isAtEndOfMacroExpansion(End, SM, LangOpts,
|
|
|
|
&End)) ||
|
|
|
|
(Range.isCharRange() && !isAtStartOfMacroExpansion(End, SM, LangOpts,
|
|
|
|
&End)))
|
2012-01-21 00:52:43 +08:00
|
|
|
return CharSourceRange();
|
2012-02-03 13:58:29 +08:00
|
|
|
Range.setEnd(End);
|
|
|
|
return makeRangeFromFileLocs(Range, SM, LangOpts);
|
2012-01-21 00:52:43 +08:00
|
|
|
}
|
2012-01-19 23:59:14 +08:00
|
|
|
|
2012-01-21 00:52:43 +08:00
|
|
|
assert(Begin.isMacroID() && End.isMacroID());
|
|
|
|
SourceLocation MacroBegin, MacroEnd;
|
|
|
|
if (isAtStartOfMacroExpansion(Begin, SM, LangOpts, &MacroBegin) &&
|
2012-02-03 13:58:29 +08:00
|
|
|
((Range.isTokenRange() && isAtEndOfMacroExpansion(End, SM, LangOpts,
|
|
|
|
&MacroEnd)) ||
|
|
|
|
(Range.isCharRange() && isAtStartOfMacroExpansion(End, SM, LangOpts,
|
|
|
|
&MacroEnd)))) {
|
|
|
|
Range.setBegin(MacroBegin);
|
|
|
|
Range.setEnd(MacroEnd);
|
|
|
|
return makeRangeFromFileLocs(Range, SM, LangOpts);
|
|
|
|
}
|
2012-01-21 00:52:43 +08:00
|
|
|
|
2013-05-17 05:37:39 +08:00
|
|
|
bool Invalid = false;
|
|
|
|
const SrcMgr::SLocEntry &BeginEntry = SM.getSLocEntry(SM.getFileID(Begin),
|
|
|
|
&Invalid);
|
|
|
|
if (Invalid)
|
2012-01-19 23:59:19 +08:00
|
|
|
return CharSourceRange();
|
|
|
|
|
2013-05-17 05:37:39 +08:00
|
|
|
if (BeginEntry.getExpansion().isMacroArgExpansion()) {
|
|
|
|
const SrcMgr::SLocEntry &EndEntry = SM.getSLocEntry(SM.getFileID(End),
|
|
|
|
&Invalid);
|
|
|
|
if (Invalid)
|
|
|
|
return CharSourceRange();
|
2012-01-19 23:59:14 +08:00
|
|
|
|
2013-05-17 05:37:39 +08:00
|
|
|
if (EndEntry.getExpansion().isMacroArgExpansion() &&
|
|
|
|
BeginEntry.getExpansion().getExpansionLocStart() ==
|
|
|
|
EndEntry.getExpansion().getExpansionLocStart()) {
|
|
|
|
Range.setBegin(SM.getImmediateSpellingLoc(Begin));
|
|
|
|
Range.setEnd(SM.getImmediateSpellingLoc(End));
|
|
|
|
return makeFileCharRange(Range, SM, LangOpts);
|
|
|
|
}
|
2012-01-21 00:52:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return CharSourceRange();
|
2012-01-19 23:59:14 +08:00
|
|
|
}
|
|
|
|
|
2012-01-19 23:59:19 +08:00
|
|
|
StringRef Lexer::getSourceText(CharSourceRange Range,
|
|
|
|
const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts,
|
|
|
|
bool *Invalid) {
|
2012-02-03 13:58:29 +08:00
|
|
|
Range = makeFileCharRange(Range, SM, LangOpts);
|
|
|
|
if (Range.isInvalid()) {
|
2012-01-19 23:59:19 +08:00
|
|
|
if (Invalid) *Invalid = true;
|
|
|
|
return StringRef();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Break down the source location.
|
|
|
|
std::pair<FileID, unsigned> beginInfo = SM.getDecomposedLoc(Range.getBegin());
|
|
|
|
if (beginInfo.first.isInvalid()) {
|
|
|
|
if (Invalid) *Invalid = true;
|
|
|
|
return StringRef();
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned EndOffs;
|
|
|
|
if (!SM.isInFileID(Range.getEnd(), beginInfo.first, &EndOffs) ||
|
|
|
|
beginInfo.second > EndOffs) {
|
|
|
|
if (Invalid) *Invalid = true;
|
|
|
|
return StringRef();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try to the load the file buffer.
|
|
|
|
bool invalidTemp = false;
|
|
|
|
StringRef file = SM.getBufferData(beginInfo.first, &invalidTemp);
|
|
|
|
if (invalidTemp) {
|
|
|
|
if (Invalid) *Invalid = true;
|
|
|
|
return StringRef();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Invalid) *Invalid = false;
|
|
|
|
return file.substr(beginInfo.second, EndOffs - beginInfo.second);
|
|
|
|
}
|
|
|
|
|
2012-01-19 04:17:16 +08:00
|
|
|
StringRef Lexer::getImmediateMacroName(SourceLocation Loc,
|
|
|
|
const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts) {
|
|
|
|
assert(Loc.isMacroID() && "Only reasonble to call this on macros");
|
2012-01-24 00:58:33 +08:00
|
|
|
|
|
|
|
// Find the location of the immediate macro expansion.
|
|
|
|
while (1) {
|
|
|
|
FileID FID = SM.getFileID(Loc);
|
|
|
|
const SrcMgr::SLocEntry *E = &SM.getSLocEntry(FID);
|
|
|
|
const SrcMgr::ExpansionInfo &Expansion = E->getExpansion();
|
|
|
|
Loc = Expansion.getExpansionLocStart();
|
|
|
|
if (!Expansion.isMacroArgExpansion())
|
|
|
|
break;
|
|
|
|
|
|
|
|
// For macro arguments we need to check that the argument did not come
|
|
|
|
// from an inner macro, e.g: "MAC1( MAC2(foo) )"
|
|
|
|
|
|
|
|
// Loc points to the argument id of the macro definition, move to the
|
|
|
|
// macro expansion.
|
2012-01-19 04:17:16 +08:00
|
|
|
Loc = SM.getImmediateExpansionRange(Loc).first;
|
2012-01-24 00:58:33 +08:00
|
|
|
SourceLocation SpellLoc = Expansion.getSpellingLoc();
|
|
|
|
if (SpellLoc.isFileID())
|
|
|
|
break; // No inner macro.
|
|
|
|
|
|
|
|
// If spelling location resides in the same FileID as macro expansion
|
|
|
|
// location, it means there is no inner macro.
|
|
|
|
FileID MacroFID = SM.getFileID(Loc);
|
|
|
|
if (SM.isInFileID(SpellLoc, MacroFID))
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Argument came from inner macro.
|
|
|
|
Loc = SpellLoc;
|
|
|
|
}
|
2012-01-19 04:17:16 +08:00
|
|
|
|
|
|
|
// Find the spelling location of the start of the non-argument expansion
|
|
|
|
// range. This is where the macro name was spelled in order to begin
|
|
|
|
// expanding this macro.
|
2012-01-24 00:58:33 +08:00
|
|
|
Loc = SM.getSpellingLoc(Loc);
|
2012-01-19 04:17:16 +08:00
|
|
|
|
|
|
|
// Dig out the buffer where the macro name was spelled and the extents of the
|
|
|
|
// name so that we can render it into the expansion note.
|
|
|
|
std::pair<FileID, unsigned> ExpansionInfo = SM.getDecomposedLoc(Loc);
|
|
|
|
unsigned MacroTokenLength = Lexer::MeasureTokenLength(Loc, SM, LangOpts);
|
|
|
|
StringRef ExpansionBuffer = SM.getBufferData(ExpansionInfo.first);
|
|
|
|
return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength);
|
|
|
|
}
|
|
|
|
|
2012-06-07 09:10:31 +08:00
|
|
|
bool Lexer::isIdentifierBodyChar(char c, const LangOptions &LangOpts) {
|
2013-02-09 06:30:22 +08:00
|
|
|
return isIdentifierBody(c, LangOpts.DollarIdents);
|
2012-06-07 09:10:31 +08:00
|
|
|
}
|
|
|
|
|
2006-06-19 00:22:51 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Diagnostics forwarding code.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2007-07-23 02:38:25 +08:00
|
|
|
/// GetMappedTokenLoc - If lexing out of a 'mapped buffer', where we pretend the
|
2011-07-14 16:20:40 +08:00
|
|
|
/// lexer buffer was all expanded at a single point, perform the mapping.
|
2007-07-23 02:38:25 +08:00
|
|
|
/// This is currently only used for _Pragma implementation, so it is the slow
|
|
|
|
/// path of the hot getSourceLocation method. Do not allow it to be inlined.
|
2010-10-23 16:44:57 +08:00
|
|
|
static LLVM_ATTRIBUTE_NOINLINE SourceLocation GetMappedTokenLoc(
|
|
|
|
Preprocessor &PP, SourceLocation FileLoc, unsigned CharNo, unsigned TokLen);
|
2007-07-23 02:38:25 +08:00
|
|
|
static SourceLocation GetMappedTokenLoc(Preprocessor &PP,
|
|
|
|
SourceLocation FileLoc,
|
2009-01-26 08:43:02 +08:00
|
|
|
unsigned CharNo, unsigned TokLen) {
|
2011-07-14 16:20:40 +08:00
|
|
|
assert(FileLoc.isMacroID() && "Must be a macro expansion");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-07-23 02:38:25 +08:00
|
|
|
// Otherwise, we're lexing "mapped tokens". This is used for things like
|
2011-07-14 16:20:40 +08:00
|
|
|
// _Pragma handling. Combine the expansion location of FileLoc with the
|
2009-01-16 15:00:02 +08:00
|
|
|
// spelling location.
|
2009-02-16 04:52:18 +08:00
|
|
|
SourceManager &SM = PP.getSourceManager();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-07-14 16:20:40 +08:00
|
|
|
// Create a new SLoc which is expanded from Expansion(FileLoc) but whose
|
2009-01-16 15:00:02 +08:00
|
|
|
// characters come from spelling(FileLoc)+Offset.
|
2009-02-16 04:52:18 +08:00
|
|
|
SourceLocation SpellingLoc = SM.getSpellingLoc(FileLoc);
|
2011-09-20 04:40:19 +08:00
|
|
|
SpellingLoc = SpellingLoc.getLocWithOffset(CharNo);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-16 04:52:18 +08:00
|
|
|
// Figure out the expansion loc range, which is the range covered by the
|
|
|
|
// original _Pragma(...) sequence.
|
|
|
|
std::pair<SourceLocation,SourceLocation> II =
|
2011-07-26 04:52:21 +08:00
|
|
|
SM.getImmediateExpansionRange(FileLoc);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-07-26 11:03:05 +08:00
|
|
|
return SM.createExpansionLoc(SpellingLoc, II.first, II.second, TokLen);
|
2007-07-23 02:38:25 +08:00
|
|
|
}
|
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
/// getSourceLocation - Return a source location identifier for the specified
|
|
|
|
/// offset in the current file.
|
2009-01-26 08:43:02 +08:00
|
|
|
SourceLocation Lexer::getSourceLocation(const char *Loc,
|
|
|
|
unsigned TokLen) const {
|
2007-07-23 02:44:36 +08:00
|
|
|
assert(Loc >= BufferStart && Loc <= BufferEnd &&
|
2006-07-03 04:05:54 +08:00
|
|
|
"Location out of range for this buffer!");
|
2007-07-21 00:37:10 +08:00
|
|
|
|
|
|
|
// In the normal case, we're just lexing from a simple file buffer, return
|
|
|
|
// the file id from FileLoc with the offset specified.
|
2007-07-23 02:44:36 +08:00
|
|
|
unsigned CharNo = Loc-BufferStart;
|
2007-07-21 00:37:10 +08:00
|
|
|
if (FileLoc.isFileID())
|
2011-09-20 04:40:19 +08:00
|
|
|
return FileLoc.getLocWithOffset(CharNo);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-17 14:22:33 +08:00
|
|
|
// Otherwise, this is the _Pragma lexer case, which pretends that all of the
|
|
|
|
// tokens are lexed from where the _Pragma was defined.
|
2007-10-18 04:41:00 +08:00
|
|
|
assert(PP && "This doesn't work on raw lexers");
|
2009-01-26 08:43:02 +08:00
|
|
|
return GetMappedTokenLoc(*PP, FileLoc, CharNo, TokLen);
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Diag - Forwarding function for diagnostics. This translate a source
|
|
|
|
/// position in the current buffer into a SourceLocation object for rendering.
|
2008-11-22 08:59:29 +08:00
|
|
|
DiagnosticBuilder Lexer::Diag(const char *Loc, unsigned DiagID) const {
|
2008-11-18 15:59:24 +08:00
|
|
|
return PP->Diag(getSourceLocation(Loc), DiagID);
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Trigraph and Escaped Newline Handling Code.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// GetTrigraphCharForLetter - Given a character that occurs after a ?? pair,
|
|
|
|
/// return the decoded trigraph letter it corresponds to, or '\0' if nothing.
|
|
|
|
static char GetTrigraphCharForLetter(char Letter) {
|
|
|
|
switch (Letter) {
|
|
|
|
default: return 0;
|
|
|
|
case '=': return '#';
|
|
|
|
case ')': return ']';
|
|
|
|
case '(': return '[';
|
|
|
|
case '!': return '|';
|
|
|
|
case '\'': return '^';
|
|
|
|
case '>': return '}';
|
|
|
|
case '/': return '\\';
|
|
|
|
case '<': return '{';
|
|
|
|
case '-': return '~';
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// DecodeTrigraphChar - If the specified character is a legal trigraph when
|
|
|
|
/// prefixed with ??, emit a trigraph warning. If trigraphs are enabled,
|
|
|
|
/// return the result character. Finally, emit a warning about trigraph use
|
|
|
|
/// whether trigraphs are enabled or not.
|
|
|
|
static char DecodeTrigraphChar(const char *CP, Lexer *L) {
|
|
|
|
char Res = GetTrigraphCharForLetter(*CP);
|
2008-11-18 15:59:24 +08:00
|
|
|
if (!Res || !L) return Res;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-03-11 15:00:24 +08:00
|
|
|
if (!L->getLangOpts().Trigraphs) {
|
2008-11-22 10:02:22 +08:00
|
|
|
if (!L->isLexingRawMode())
|
|
|
|
L->Diag(CP-2, diag::trigraph_ignored);
|
2008-11-18 15:59:24 +08:00
|
|
|
return 0;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-11-22 10:02:22 +08:00
|
|
|
if (!L->isLexingRawMode())
|
2011-07-23 18:55:15 +08:00
|
|
|
L->Diag(CP-2, diag::trigraph_converted) << StringRef(&Res, 1);
|
2006-06-18 13:43:12 +08:00
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
|
2009-04-19 06:05:41 +08:00
|
|
|
/// getEscapedNewLineSize - Return the size of the specified escaped newline,
|
|
|
|
/// or 0 if it is not an escaped newline. P[-1] is known to be a "\" or a
|
2009-09-09 23:08:12 +08:00
|
|
|
/// trigraph equivalent on entry to this function.
|
2009-04-19 06:05:41 +08:00
|
|
|
unsigned Lexer::getEscapedNewLineSize(const char *Ptr) {
|
|
|
|
unsigned Size = 0;
|
|
|
|
while (isWhitespace(Ptr[Size])) {
|
|
|
|
++Size;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-04-19 06:05:41 +08:00
|
|
|
if (Ptr[Size-1] != '\n' && Ptr[Size-1] != '\r')
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// If this is a \r\n or \n\r, skip the other half.
|
|
|
|
if ((Ptr[Size] == '\r' || Ptr[Size] == '\n') &&
|
|
|
|
Ptr[Size-1] != Ptr[Size])
|
|
|
|
++Size;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-04-19 06:05:41 +08:00
|
|
|
return Size;
|
2009-09-09 23:08:12 +08:00
|
|
|
}
|
|
|
|
|
2009-04-19 06:05:41 +08:00
|
|
|
// Not an escaped newline, must be a \t or something else.
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-04-19 06:27:02 +08:00
|
|
|
/// SkipEscapedNewLines - If P points to an escaped newline (or a series of
|
|
|
|
/// them), skip over them and return the first non-escaped-newline found,
|
|
|
|
/// otherwise return P.
|
|
|
|
const char *Lexer::SkipEscapedNewLines(const char *P) {
|
|
|
|
while (1) {
|
|
|
|
const char *AfterEscape;
|
|
|
|
if (*P == '\\') {
|
|
|
|
AfterEscape = P+1;
|
|
|
|
} else if (*P == '?') {
|
|
|
|
// If not a trigraph for escape, bail out.
|
|
|
|
if (P[1] != '?' || P[2] != '/')
|
|
|
|
return P;
|
|
|
|
AfterEscape = P+3;
|
|
|
|
} else {
|
|
|
|
return P;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-04-19 06:27:02 +08:00
|
|
|
unsigned NewLineSize = Lexer::getEscapedNewLineSize(AfterEscape);
|
|
|
|
if (NewLineSize == 0) return P;
|
|
|
|
P = AfterEscape+NewLineSize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-28 05:43:43 +08:00
|
|
|
/// \brief Checks that the given token is the first token that occurs after the
|
|
|
|
/// given location (this excludes comments and whitespace). Returns the location
|
|
|
|
/// immediately after the specified token. If the token is not found or the
|
|
|
|
/// location is inside a macro, the returned source location will be invalid.
|
|
|
|
SourceLocation Lexer::findLocationAfterToken(SourceLocation Loc,
|
|
|
|
tok::TokenKind TKind,
|
|
|
|
const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts,
|
|
|
|
bool SkipTrailingWhitespaceAndNewLine) {
|
|
|
|
if (Loc.isMacroID()) {
|
2012-01-19 23:59:08 +08:00
|
|
|
if (!Lexer::isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc))
|
2011-07-28 05:43:43 +08:00
|
|
|
return SourceLocation();
|
|
|
|
}
|
|
|
|
Loc = Lexer::getLocForEndOfToken(Loc, 0, SM, LangOpts);
|
|
|
|
|
|
|
|
// Break down the source location.
|
|
|
|
std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
|
|
|
|
|
|
|
|
// Try to load the file buffer.
|
|
|
|
bool InvalidTemp = false;
|
2013-01-13 03:30:44 +08:00
|
|
|
StringRef File = SM.getBufferData(LocInfo.first, &InvalidTemp);
|
2011-07-28 05:43:43 +08:00
|
|
|
if (InvalidTemp)
|
|
|
|
return SourceLocation();
|
|
|
|
|
|
|
|
const char *TokenBegin = File.data() + LocInfo.second;
|
|
|
|
|
|
|
|
// Lex from the start of the given location.
|
|
|
|
Lexer lexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts, File.begin(),
|
|
|
|
TokenBegin, File.end());
|
|
|
|
// Find the token.
|
|
|
|
Token Tok;
|
|
|
|
lexer.LexFromRawLexer(Tok);
|
|
|
|
if (Tok.isNot(TKind))
|
|
|
|
return SourceLocation();
|
|
|
|
SourceLocation TokenLoc = Tok.getLocation();
|
|
|
|
|
|
|
|
// Calculate how much whitespace needs to be skipped if any.
|
|
|
|
unsigned NumWhitespaceChars = 0;
|
|
|
|
if (SkipTrailingWhitespaceAndNewLine) {
|
|
|
|
const char *TokenEnd = SM.getCharacterData(TokenLoc) +
|
|
|
|
Tok.getLength();
|
|
|
|
unsigned char C = *TokenEnd;
|
|
|
|
while (isHorizontalWhitespace(C)) {
|
|
|
|
C = *(++TokenEnd);
|
|
|
|
NumWhitespaceChars++;
|
|
|
|
}
|
2012-11-14 09:28:38 +08:00
|
|
|
|
|
|
|
// Skip \r, \n, \r\n, or \n\r
|
|
|
|
if (C == '\n' || C == '\r') {
|
|
|
|
char PrevC = C;
|
|
|
|
C = *(++TokenEnd);
|
2011-07-28 05:43:43 +08:00
|
|
|
NumWhitespaceChars++;
|
2012-11-14 09:28:38 +08:00
|
|
|
if ((C == '\n' || C == '\r') && C != PrevC)
|
|
|
|
NumWhitespaceChars++;
|
|
|
|
}
|
2011-07-28 05:43:43 +08:00
|
|
|
}
|
|
|
|
|
2011-09-20 04:40:19 +08:00
|
|
|
return TokenLoc.getLocWithOffset(Tok.getLength() + NumWhitespaceChars);
|
2011-07-28 05:43:43 +08:00
|
|
|
}
|
2009-04-19 06:05:41 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
/// getCharAndSizeSlow - Peek a single 'character' from the specified buffer,
|
|
|
|
/// get its size, and return it. This is tricky in several cases:
|
|
|
|
/// 1. If currently at the start of a trigraph, we warn about the trigraph,
|
|
|
|
/// then either return the trigraph (skipping 3 chars) or the '?',
|
|
|
|
/// depending on whether trigraphs are enabled or not.
|
|
|
|
/// 2. If this is an escaped newline (potentially with whitespace between
|
|
|
|
/// the backslash and newline), implicitly skip the newline and return
|
|
|
|
/// the char after it.
|
|
|
|
///
|
|
|
|
/// This handles the slow/uncommon case of the getCharAndSize method. Here we
|
|
|
|
/// know that we can accumulate into Size, and that we have already incremented
|
|
|
|
/// Ptr by Size bytes.
|
|
|
|
///
|
2006-06-19 00:22:51 +08:00
|
|
|
/// NOTE: When this method is updated, getCharAndSizeSlowNoWarn (below) should
|
|
|
|
/// be updated to match.
|
2006-06-18 13:43:12 +08:00
|
|
|
///
|
|
|
|
char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size,
|
2007-07-21 00:59:19 +08:00
|
|
|
Token *Tok) {
|
2006-06-18 13:43:12 +08:00
|
|
|
// If we have a slash, look for an escaped newline.
|
|
|
|
if (Ptr[0] == '\\') {
|
|
|
|
++Size;
|
|
|
|
++Ptr;
|
|
|
|
Slash:
|
|
|
|
// Common case, backslash-char where the char is not whitespace.
|
|
|
|
if (!isWhitespace(Ptr[0])) return '\\';
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-06-23 13:15:06 +08:00
|
|
|
// See if we have optional whitespace characters between the slash and
|
|
|
|
// newline.
|
2009-04-19 06:05:41 +08:00
|
|
|
if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) {
|
|
|
|
// Remember that this token needs to be cleaned.
|
|
|
|
if (Tok) Tok->setFlag(Token::NeedsCleaning);
|
|
|
|
|
|
|
|
// Warn if there was whitespace between the backslash and newline.
|
2009-06-23 13:15:06 +08:00
|
|
|
if (Ptr[0] != '\n' && Ptr[0] != '\r' && Tok && !isLexingRawMode())
|
2009-04-19 06:05:41 +08:00
|
|
|
Diag(Ptr, diag::backslash_newline_space);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-04-19 06:05:41 +08:00
|
|
|
// Found backslash<whitespace><newline>. Parse the char after it.
|
|
|
|
Size += EscapedNewLineSize;
|
|
|
|
Ptr += EscapedNewLineSize;
|
2011-12-22 04:19:55 +08:00
|
|
|
|
2011-12-22 12:38:07 +08:00
|
|
|
// If the char that we finally got was a \n, then we must have had
|
|
|
|
// something like \<newline><newline>. We don't want to consume the
|
|
|
|
// second newline.
|
|
|
|
if (*Ptr == '\n' || *Ptr == '\r' || *Ptr == '\0')
|
|
|
|
return ' ';
|
2011-12-22 04:19:55 +08:00
|
|
|
|
2009-04-19 06:05:41 +08:00
|
|
|
// Use slow version to accumulate a correct size field.
|
|
|
|
return getCharAndSizeSlow(Ptr, Size, Tok);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// Otherwise, this is not an escaped newline, just return the slash.
|
|
|
|
return '\\';
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// If this is a trigraph, process it.
|
|
|
|
if (Ptr[0] == '?' && Ptr[1] == '?') {
|
|
|
|
// If this is actually a legal trigraph (not something like "??x"), emit
|
|
|
|
// a trigraph warning. If so, and if trigraphs are enabled, return it.
|
|
|
|
if (char C = DecodeTrigraphChar(Ptr+2, Tok ? this : 0)) {
|
|
|
|
// Remember that this token needs to be cleaned.
|
2007-07-21 00:59:19 +08:00
|
|
|
if (Tok) Tok->setFlag(Token::NeedsCleaning);
|
2006-06-18 13:43:12 +08:00
|
|
|
|
|
|
|
Ptr += 3;
|
|
|
|
Size += 3;
|
|
|
|
if (C == '\\') goto Slash;
|
|
|
|
return C;
|
|
|
|
}
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// If this is neither, return a single character.
|
|
|
|
++Size;
|
|
|
|
return *Ptr;
|
|
|
|
}
|
|
|
|
|
2006-06-19 00:22:51 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
/// getCharAndSizeSlowNoWarn - Handle the slow/uncommon case of the
|
|
|
|
/// getCharAndSizeNoWarn method. Here we know that we can accumulate into Size,
|
|
|
|
/// and that we have already incremented Ptr by Size bytes.
|
|
|
|
///
|
2006-06-19 00:22:51 +08:00
|
|
|
/// NOTE: When this method is updated, getCharAndSizeSlow (above) should
|
|
|
|
/// be updated to match.
|
|
|
|
char Lexer::getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size,
|
2012-03-11 15:00:24 +08:00
|
|
|
const LangOptions &LangOpts) {
|
2006-06-18 13:43:12 +08:00
|
|
|
// If we have a slash, look for an escaped newline.
|
|
|
|
if (Ptr[0] == '\\') {
|
|
|
|
++Size;
|
|
|
|
++Ptr;
|
|
|
|
Slash:
|
|
|
|
// Common case, backslash-char where the char is not whitespace.
|
|
|
|
if (!isWhitespace(Ptr[0])) return '\\';
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// See if we have optional whitespace characters followed by a newline.
|
2009-04-19 06:05:41 +08:00
|
|
|
if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) {
|
|
|
|
// Found backslash<whitespace><newline>. Parse the char after it.
|
|
|
|
Size += EscapedNewLineSize;
|
|
|
|
Ptr += EscapedNewLineSize;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-12-22 12:38:07 +08:00
|
|
|
// If the char that we finally got was a \n, then we must have had
|
|
|
|
// something like \<newline><newline>. We don't want to consume the
|
|
|
|
// second newline.
|
|
|
|
if (*Ptr == '\n' || *Ptr == '\r' || *Ptr == '\0')
|
|
|
|
return ' ';
|
2011-12-22 04:19:55 +08:00
|
|
|
|
2009-04-19 06:05:41 +08:00
|
|
|
// Use slow version to accumulate a correct size field.
|
2012-03-11 15:00:24 +08:00
|
|
|
return getCharAndSizeSlowNoWarn(Ptr, Size, LangOpts);
|
2009-04-19 06:05:41 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// Otherwise, this is not an escaped newline, just return the slash.
|
|
|
|
return '\\';
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// If this is a trigraph, process it.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (LangOpts.Trigraphs && Ptr[0] == '?' && Ptr[1] == '?') {
|
2006-06-18 13:43:12 +08:00
|
|
|
// If this is actually a legal trigraph (not something like "??x"), return
|
|
|
|
// it.
|
|
|
|
if (char C = GetTrigraphCharForLetter(Ptr[2])) {
|
|
|
|
Ptr += 3;
|
|
|
|
Size += 3;
|
|
|
|
if (C == '\\') goto Slash;
|
|
|
|
return C;
|
|
|
|
}
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// If this is neither, return a single character.
|
|
|
|
++Size;
|
|
|
|
return *Ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Helper methods for lexing.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
Introduce basic support for loading a precompiled preamble while
reparsing an ASTUnit. When saving a preamble, create a buffer larger
than the actual file we're working with but fill everything from the
end of the preamble to the end of the file with spaces (so the lexer
will quickly skip them). When we load the file, create a buffer of the
same size, filling it with the file and then spaces. Then, instruct
the lexer to start lexing after the preamble, therefore continuing the
parse from the spot where the preamble left off.
It's now possible to perform a simple preamble build + parse (+
reparse) with ASTUnit. However, one has to disable a bunch of checking
in the PCH reader to do so. That part isn't committed; it will likely
be handled with some other kind of flag (e.g., -fno-validate-pch).
As part of this, fix some issues with null termination of the memory
buffers created for the preamble; we were trying to explicitly
NULL-terminate them, even though they were also getting implicitly
NULL terminated, leading to excess warnings about NULL characters in
source files.
llvm-svn: 109445
2010-07-27 05:36:20 +08:00
|
|
|
/// \brief Routine that indiscriminately skips bytes in the source file.
|
|
|
|
void Lexer::SkipBytes(unsigned Bytes, bool StartOfLine) {
|
|
|
|
BufferPtr += Bytes;
|
|
|
|
if (BufferPtr > BufferEnd)
|
|
|
|
BufferPtr = BufferEnd;
|
2013-09-19 08:41:32 +08:00
|
|
|
// FIXME: What exactly does the StartOfLine bit mean? There are two
|
|
|
|
// possible meanings for the "start" of the line: the first token on the
|
|
|
|
// unexpanded line, or the first token on the expanded line.
|
Introduce basic support for loading a precompiled preamble while
reparsing an ASTUnit. When saving a preamble, create a buffer larger
than the actual file we're working with but fill everything from the
end of the preamble to the end of the file with spaces (so the lexer
will quickly skip them). When we load the file, create a buffer of the
same size, filling it with the file and then spaces. Then, instruct
the lexer to start lexing after the preamble, therefore continuing the
parse from the spot where the preamble left off.
It's now possible to perform a simple preamble build + parse (+
reparse) with ASTUnit. However, one has to disable a bunch of checking
in the PCH reader to do so. That part isn't committed; it will likely
be handled with some other kind of flag (e.g., -fno-validate-pch).
As part of this, fix some issues with null termination of the memory
buffers created for the preamble; we were trying to explicitly
NULL-terminate them, even though they were also getting implicitly
NULL terminated, leading to excess warnings about NULL characters in
source files.
llvm-svn: 109445
2010-07-27 05:36:20 +08:00
|
|
|
IsAtStartOfLine = StartOfLine;
|
2013-09-19 08:41:32 +08:00
|
|
|
IsAtPhysicalStartOfLine = StartOfLine;
|
Introduce basic support for loading a precompiled preamble while
reparsing an ASTUnit. When saving a preamble, create a buffer larger
than the actual file we're working with but fill everything from the
end of the preamble to the end of the file with spaces (so the lexer
will quickly skip them). When we load the file, create a buffer of the
same size, filling it with the file and then spaces. Then, instruct
the lexer to start lexing after the preamble, therefore continuing the
parse from the spot where the preamble left off.
It's now possible to perform a simple preamble build + parse (+
reparse) with ASTUnit. However, one has to disable a bunch of checking
in the PCH reader to do so. That part isn't committed; it will likely
be handled with some other kind of flag (e.g., -fno-validate-pch).
As part of this, fix some issues with null termination of the memory
buffers created for the preamble; we were trying to explicitly
NULL-terminate them, even though they were also getting implicitly
NULL terminated, leading to excess warnings about NULL characters in
source files.
llvm-svn: 109445
2010-07-27 05:36:20 +08:00
|
|
|
}
|
|
|
|
|
2013-02-09 09:10:25 +08:00
|
|
|
static bool isAllowedIDChar(uint32_t C, const LangOptions &LangOpts) {
|
2013-08-29 20:12:31 +08:00
|
|
|
if (LangOpts.CPlusPlus11 || LangOpts.C11) {
|
|
|
|
static const llvm::sys::UnicodeCharSet C11AllowedIDChars(
|
|
|
|
C11AllowedIDCharRanges);
|
|
|
|
return C11AllowedIDChars.contains(C);
|
|
|
|
} else if (LangOpts.CPlusPlus) {
|
|
|
|
static const llvm::sys::UnicodeCharSet CXX03AllowedIDChars(
|
|
|
|
CXX03AllowedIDCharRanges);
|
|
|
|
return CXX03AllowedIDChars.contains(C);
|
|
|
|
} else {
|
|
|
|
static const llvm::sys::UnicodeCharSet C99AllowedIDChars(
|
|
|
|
C99AllowedIDCharRanges);
|
|
|
|
return C99AllowedIDChars.contains(C);
|
|
|
|
}
|
2013-01-25 04:50:46 +08:00
|
|
|
}
|
|
|
|
|
2013-02-09 09:10:25 +08:00
|
|
|
static bool isAllowedInitiallyIDChar(uint32_t C, const LangOptions &LangOpts) {
|
|
|
|
assert(isAllowedIDChar(C, LangOpts));
|
2013-08-29 20:12:31 +08:00
|
|
|
if (LangOpts.CPlusPlus11 || LangOpts.C11) {
|
|
|
|
static const llvm::sys::UnicodeCharSet C11DisallowedInitialIDChars(
|
|
|
|
C11DisallowedInitialIDCharRanges);
|
|
|
|
return !C11DisallowedInitialIDChars.contains(C);
|
|
|
|
} else if (LangOpts.CPlusPlus) {
|
2013-02-09 09:10:25 +08:00
|
|
|
return true;
|
2013-08-29 20:12:31 +08:00
|
|
|
} else {
|
|
|
|
static const llvm::sys::UnicodeCharSet C99DisallowedInitialIDChars(
|
|
|
|
C99DisallowedInitialIDCharRanges);
|
|
|
|
return !C99DisallowedInitialIDChars.contains(C);
|
|
|
|
}
|
2013-02-09 09:10:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline CharSourceRange makeCharRange(Lexer &L, const char *Begin,
|
|
|
|
const char *End) {
|
|
|
|
return CharSourceRange::getCharRange(L.getSourceLocation(Begin),
|
|
|
|
L.getSourceLocation(End));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void maybeDiagnoseIDCharCompat(DiagnosticsEngine &Diags, uint32_t C,
|
|
|
|
CharSourceRange Range, bool IsFirst) {
|
|
|
|
// Check C99 compatibility.
|
|
|
|
if (Diags.getDiagnosticLevel(diag::warn_c99_compat_unicode_id,
|
|
|
|
Range.getBegin()) > DiagnosticsEngine::Ignored) {
|
|
|
|
enum {
|
|
|
|
CannotAppearInIdentifier = 0,
|
|
|
|
CannotStartIdentifier
|
|
|
|
};
|
|
|
|
|
2013-08-29 20:12:31 +08:00
|
|
|
static const llvm::sys::UnicodeCharSet C99AllowedIDChars(
|
|
|
|
C99AllowedIDCharRanges);
|
|
|
|
static const llvm::sys::UnicodeCharSet C99DisallowedInitialIDChars(
|
|
|
|
C99DisallowedInitialIDCharRanges);
|
|
|
|
if (!C99AllowedIDChars.contains(C)) {
|
2013-02-09 09:10:25 +08:00
|
|
|
Diags.Report(Range.getBegin(), diag::warn_c99_compat_unicode_id)
|
|
|
|
<< Range
|
|
|
|
<< CannotAppearInIdentifier;
|
2013-08-29 20:12:31 +08:00
|
|
|
} else if (IsFirst && C99DisallowedInitialIDChars.contains(C)) {
|
2013-02-09 09:10:25 +08:00
|
|
|
Diags.Report(Range.getBegin(), diag::warn_c99_compat_unicode_id)
|
|
|
|
<< Range
|
|
|
|
<< CannotStartIdentifier;
|
|
|
|
}
|
2013-01-25 04:50:46 +08:00
|
|
|
}
|
|
|
|
|
2013-02-09 09:10:25 +08:00
|
|
|
// Check C++98 compatibility.
|
|
|
|
if (Diags.getDiagnosticLevel(diag::warn_cxx98_compat_unicode_id,
|
|
|
|
Range.getBegin()) > DiagnosticsEngine::Ignored) {
|
2013-08-29 20:12:31 +08:00
|
|
|
static const llvm::sys::UnicodeCharSet CXX03AllowedIDChars(
|
|
|
|
CXX03AllowedIDCharRanges);
|
|
|
|
if (!CXX03AllowedIDChars.contains(C)) {
|
2013-02-09 09:10:25 +08:00
|
|
|
Diags.Report(Range.getBegin(), diag::warn_cxx98_compat_unicode_id)
|
|
|
|
<< Range;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-01-25 04:50:46 +08:00
|
|
|
|
2013-09-19 08:41:32 +08:00
|
|
|
bool Lexer::LexIdentifier(Token &Result, const char *CurPtr) {
|
2006-06-18 13:43:12 +08:00
|
|
|
// Match [_A-Za-z0-9]*, we have already matched [_A-Za-z$]
|
|
|
|
unsigned Size;
|
|
|
|
unsigned char C = *CurPtr++;
|
2010-01-11 10:38:50 +08:00
|
|
|
while (isIdentifierBody(C))
|
2006-06-18 13:43:12 +08:00
|
|
|
C = *CurPtr++;
|
2010-01-11 10:38:50 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
--CurPtr; // Back up over the skipped character.
|
|
|
|
|
|
|
|
// Fast path, no $,\,? in identifier found. '\' might be an escaped newline
|
|
|
|
// or UCN, and ? might be a trigraph for '\', an escaped newline or UCN.
|
2010-01-11 10:38:50 +08:00
|
|
|
//
|
2013-02-09 06:30:22 +08:00
|
|
|
// TODO: Could merge these checks into an InfoTable flag to make the
|
|
|
|
// comparison cheaper
|
2013-01-25 04:50:46 +08:00
|
|
|
if (isASCII(C) && C != '\\' && C != '?' &&
|
|
|
|
(C != '$' || !LangOpts.DollarIdents)) {
|
2006-06-18 13:43:12 +08:00
|
|
|
FinishIdentifier:
|
2006-07-08 16:28:12 +08:00
|
|
|
const char *IdStart = BufferPtr;
|
2010-12-22 16:23:18 +08:00
|
|
|
FormTokenWithChars(Result, CurPtr, tok::raw_identifier);
|
|
|
|
Result.setRawIdentifierData(IdStart);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-07-20 12:16:23 +08:00
|
|
|
// If we are in raw mode, return this identifier raw. There is no need to
|
|
|
|
// look up identifier information or attempt to macro expand it.
|
2010-12-22 16:23:18 +08:00
|
|
|
if (LexingRawMode)
|
2013-09-19 08:41:32 +08:00
|
|
|
return true;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-12-22 16:23:18 +08:00
|
|
|
// Fill in Result.IdentifierInfo and update the token kind,
|
|
|
|
// looking up the identifier in the identifier table.
|
|
|
|
IdentifierInfo *II = PP->LookUpIdentifierInfo(Result);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-19 00:41:01 +08:00
|
|
|
// Finally, now that we know we have an identifier, pass this off to the
|
|
|
|
// preprocessor, which may macro expand it or something.
|
2009-01-21 15:45:14 +08:00
|
|
|
if (II->isHandleIdentifierCase())
|
2013-09-19 08:41:32 +08:00
|
|
|
return PP->HandleIdentifier(Result);
|
2011-08-27 07:56:07 +08:00
|
|
|
|
2013-09-19 08:41:32 +08:00
|
|
|
return true;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// Otherwise, $,\,? in identifier found. Enter slower path.
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
C = getCharAndSize(CurPtr, Size);
|
|
|
|
while (1) {
|
|
|
|
if (C == '$') {
|
|
|
|
// If we hit a $ and they are not supported in identifiers, we are done.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (!LangOpts.DollarIdents) goto FinishIdentifier;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// Otherwise, emit a diagnostic and continue.
|
2008-11-22 10:02:22 +08:00
|
|
|
if (!isLexingRawMode())
|
|
|
|
Diag(CurPtr, diag::ext_dollar_in_identifier);
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(CurPtr, Size, Result);
|
|
|
|
C = getCharAndSize(CurPtr, Size);
|
|
|
|
continue;
|
2013-01-25 04:50:46 +08:00
|
|
|
|
|
|
|
} else if (C == '\\') {
|
|
|
|
const char *UCNPtr = CurPtr + Size;
|
|
|
|
uint32_t CodePoint = tryReadUCN(UCNPtr, CurPtr, /*Token=*/0);
|
2013-02-09 09:10:25 +08:00
|
|
|
if (CodePoint == 0 || !isAllowedIDChar(CodePoint, LangOpts))
|
2013-01-25 04:50:46 +08:00
|
|
|
goto FinishIdentifier;
|
|
|
|
|
2013-02-09 09:10:25 +08:00
|
|
|
if (!isLexingRawMode()) {
|
|
|
|
maybeDiagnoseIDCharCompat(PP->getDiagnostics(), CodePoint,
|
|
|
|
makeCharRange(*this, CurPtr, UCNPtr),
|
|
|
|
/*IsFirst=*/false);
|
|
|
|
}
|
|
|
|
|
2013-01-25 04:50:46 +08:00
|
|
|
Result.setFlag(Token::HasUCN);
|
|
|
|
if ((UCNPtr - CurPtr == 6 && CurPtr[1] == 'u') ||
|
|
|
|
(UCNPtr - CurPtr == 10 && CurPtr[1] == 'U'))
|
|
|
|
CurPtr = UCNPtr;
|
|
|
|
else
|
|
|
|
while (CurPtr != UCNPtr)
|
|
|
|
(void)getAndAdvanceChar(CurPtr, Result);
|
|
|
|
|
|
|
|
C = getCharAndSize(CurPtr, Size);
|
|
|
|
continue;
|
|
|
|
} else if (!isASCII(C)) {
|
|
|
|
const char *UnicodePtr = CurPtr;
|
|
|
|
UTF32 CodePoint;
|
2013-01-30 20:06:08 +08:00
|
|
|
ConversionResult Result =
|
|
|
|
llvm::convertUTF8Sequence((const UTF8 **)&UnicodePtr,
|
|
|
|
(const UTF8 *)BufferEnd,
|
|
|
|
&CodePoint,
|
|
|
|
strictConversion);
|
2013-01-25 04:50:46 +08:00
|
|
|
if (Result != conversionOK ||
|
2013-02-09 09:10:25 +08:00
|
|
|
!isAllowedIDChar(static_cast<uint32_t>(CodePoint), LangOpts))
|
2013-01-25 04:50:46 +08:00
|
|
|
goto FinishIdentifier;
|
|
|
|
|
2013-02-09 09:10:25 +08:00
|
|
|
if (!isLexingRawMode()) {
|
|
|
|
maybeDiagnoseIDCharCompat(PP->getDiagnostics(), CodePoint,
|
|
|
|
makeCharRange(*this, CurPtr, UnicodePtr),
|
|
|
|
/*IsFirst=*/false);
|
|
|
|
}
|
|
|
|
|
2013-01-25 04:50:46 +08:00
|
|
|
CurPtr = UnicodePtr;
|
|
|
|
C = getCharAndSize(CurPtr, Size);
|
|
|
|
continue;
|
|
|
|
} else if (!isIdentifierBody(C)) {
|
2006-06-18 13:43:12 +08:00
|
|
|
goto FinishIdentifier;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, this character is good, consume it.
|
|
|
|
CurPtr = ConsumeChar(CurPtr, Size, Result);
|
|
|
|
|
|
|
|
C = getCharAndSize(CurPtr, Size);
|
2013-01-25 04:50:46 +08:00
|
|
|
while (isIdentifierBody(C)) {
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(CurPtr, Size, Result);
|
|
|
|
C = getCharAndSize(CurPtr, Size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-30 22:50:47 +08:00
|
|
|
/// isHexaLiteral - Return true if Start points to a hex constant.
|
2010-08-31 01:11:14 +08:00
|
|
|
/// in microsoft mode (where this is supposed to be several different tokens).
|
2012-08-31 10:29:37 +08:00
|
|
|
bool Lexer::isHexaLiteral(const char *Start, const LangOptions &LangOpts) {
|
2010-09-01 00:42:00 +08:00
|
|
|
unsigned Size;
|
2012-03-11 15:00:24 +08:00
|
|
|
char C1 = Lexer::getCharAndSizeNoWarn(Start, Size, LangOpts);
|
2010-09-01 00:42:00 +08:00
|
|
|
if (C1 != '0')
|
|
|
|
return false;
|
2012-03-11 15:00:24 +08:00
|
|
|
char C2 = Lexer::getCharAndSizeNoWarn(Start + Size, Size, LangOpts);
|
2010-09-01 00:42:00 +08:00
|
|
|
return (C2 == 'x' || C2 == 'X');
|
2010-08-30 22:50:47 +08:00
|
|
|
}
|
2006-06-18 13:43:12 +08:00
|
|
|
|
2008-04-14 10:26:39 +08:00
|
|
|
/// LexNumericConstant - Lex the remainder of a integer or floating point
|
2006-06-18 13:43:12 +08:00
|
|
|
/// constant. From[-1] is the first character lexed. Return the end of the
|
|
|
|
/// constant.
|
2013-09-19 08:41:32 +08:00
|
|
|
bool Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
|
2006-06-18 13:43:12 +08:00
|
|
|
unsigned Size;
|
|
|
|
char C = getCharAndSize(CurPtr, Size);
|
|
|
|
char PrevCh = 0;
|
2013-02-09 06:30:22 +08:00
|
|
|
while (isPreprocessingNumberBody(C)) { // FIXME: UCNs in ud-suffix.
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(CurPtr, Size, Result);
|
|
|
|
PrevCh = C;
|
|
|
|
C = getCharAndSize(CurPtr, Size);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// If we fell out, check for a sign, due to 1e+12. If we have one, continue.
|
2010-08-31 01:09:08 +08:00
|
|
|
if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e')) {
|
|
|
|
// If we are in Microsoft mode, don't continue if the constant is hex.
|
|
|
|
// For example, MSVC will accept the following as 3 tokens: 0x1234567e+1
|
2012-03-11 15:00:24 +08:00
|
|
|
if (!LangOpts.MicrosoftExt || !isHexaLiteral(BufferPtr, LangOpts))
|
2010-08-31 01:09:08 +08:00
|
|
|
return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
|
|
|
|
}
|
2006-06-18 13:43:12 +08:00
|
|
|
|
|
|
|
// If we have a hex FP constant, continue.
|
2012-06-15 13:07:49 +08:00
|
|
|
if ((C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p')) {
|
|
|
|
// Outside C99, we accept hexadecimal floating point numbers as a
|
|
|
|
// not-quite-conforming extension. Only do so if this looks like it's
|
|
|
|
// actually meant to be a hexfloat, and not if it has a ud-suffix.
|
|
|
|
bool IsHexFloat = true;
|
|
|
|
if (!LangOpts.C99) {
|
|
|
|
if (!isHexaLiteral(BufferPtr, LangOpts))
|
|
|
|
IsHexFloat = false;
|
|
|
|
else if (std::find(BufferPtr, CurPtr, '_') != CurPtr)
|
|
|
|
IsHexFloat = false;
|
|
|
|
}
|
|
|
|
if (IsHexFloat)
|
|
|
|
return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2013-09-26 11:33:06 +08:00
|
|
|
// If we have a digit separator, continue.
|
|
|
|
if (C == '\'' && getLangOpts().CPlusPlus1y) {
|
|
|
|
unsigned NextSize;
|
|
|
|
char Next = getCharAndSizeNoWarn(CurPtr + Size, NextSize, getLangOpts());
|
2013-09-27 02:13:20 +08:00
|
|
|
if (isIdentifierBody(Next)) {
|
2013-09-26 11:33:06 +08:00
|
|
|
if (!isLexingRawMode())
|
|
|
|
Diag(CurPtr, diag::warn_cxx11_compat_digit_separator);
|
|
|
|
CurPtr = ConsumeChar(CurPtr, Size, Result);
|
|
|
|
return LexNumericConstant(Result, CurPtr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-06-19 00:22:51 +08:00
|
|
|
// Update the location of token as well as BufferPtr.
|
2009-01-27 03:29:26 +08:00
|
|
|
const char *TokStart = BufferPtr;
|
2008-10-12 12:51:35 +08:00
|
|
|
FormTokenWithChars(Result, CurPtr, tok::numeric_constant);
|
2009-01-27 03:29:26 +08:00
|
|
|
Result.setLiteralData(TokStart);
|
2013-09-19 08:41:32 +08:00
|
|
|
return true;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
|
2012-03-05 12:02:15 +08:00
|
|
|
/// LexUDSuffix - Lex the ud-suffix production for user-defined literal suffixes
|
2012-03-07 11:13:00 +08:00
|
|
|
/// in C++11, or warn on a ud-suffix in C++98.
|
2013-07-23 16:14:48 +08:00
|
|
|
const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr,
|
|
|
|
bool IsStringLiteral) {
|
2012-03-11 15:00:24 +08:00
|
|
|
assert(getLangOpts().CPlusPlus);
|
2012-03-05 12:02:15 +08:00
|
|
|
|
|
|
|
// Maximally munch an identifier. FIXME: UCNs.
|
|
|
|
unsigned Size;
|
|
|
|
char C = getCharAndSize(CurPtr, Size);
|
|
|
|
if (isIdentifierHead(C)) {
|
2013-01-02 19:42:31 +08:00
|
|
|
if (!getLangOpts().CPlusPlus11) {
|
2012-03-07 11:13:00 +08:00
|
|
|
if (!isLexingRawMode())
|
2012-03-08 10:39:21 +08:00
|
|
|
Diag(CurPtr,
|
|
|
|
C == '_' ? diag::warn_cxx11_compat_user_defined_literal
|
|
|
|
: diag::warn_cxx11_compat_reserved_user_defined_literal)
|
|
|
|
<< FixItHint::CreateInsertion(getSourceLocation(CurPtr), " ");
|
|
|
|
return CurPtr;
|
|
|
|
}
|
|
|
|
|
|
|
|
// C++11 [lex.ext]p10, [usrlit.suffix]p1: A program containing a ud-suffix
|
|
|
|
// that does not start with an underscore is ill-formed. As a conforming
|
|
|
|
// extension, we treat all such suffixes as if they had whitespace before
|
|
|
|
// them.
|
2013-07-23 16:14:48 +08:00
|
|
|
bool IsUDSuffix = false;
|
|
|
|
if (C == '_')
|
|
|
|
IsUDSuffix = true;
|
2013-09-24 12:06:10 +08:00
|
|
|
else if (IsStringLiteral && getLangOpts().CPlusPlus1y) {
|
|
|
|
// In C++1y, we need to look ahead a few characters to see if this is a
|
|
|
|
// valid suffix for a string literal or a numeric literal (this could be
|
|
|
|
// the 'operator""if' defining a numeric literal operator).
|
2013-09-25 06:13:21 +08:00
|
|
|
const unsigned MaxStandardSuffixLength = 3;
|
2013-09-24 12:06:10 +08:00
|
|
|
char Buffer[MaxStandardSuffixLength] = { C };
|
|
|
|
unsigned Consumed = Size;
|
|
|
|
unsigned Chars = 1;
|
|
|
|
while (true) {
|
|
|
|
unsigned NextSize;
|
|
|
|
char Next = getCharAndSizeNoWarn(CurPtr + Consumed, NextSize,
|
|
|
|
getLangOpts());
|
|
|
|
if (!isIdentifierBody(Next)) {
|
|
|
|
// End of suffix. Check whether this is on the whitelist.
|
|
|
|
IsUDSuffix = (Chars == 1 && Buffer[0] == 's') ||
|
|
|
|
NumericLiteralParser::isValidUDSuffix(
|
|
|
|
getLangOpts(), StringRef(Buffer, Chars));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Chars == MaxStandardSuffixLength)
|
|
|
|
// Too long: can't be a standard suffix.
|
|
|
|
break;
|
|
|
|
|
|
|
|
Buffer[Chars++] = Next;
|
|
|
|
Consumed += NextSize;
|
|
|
|
}
|
2013-07-23 16:14:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsUDSuffix) {
|
2012-03-08 10:39:21 +08:00
|
|
|
if (!isLexingRawMode())
|
2013-07-23 16:14:48 +08:00
|
|
|
Diag(CurPtr, getLangOpts().MicrosoftMode ?
|
2012-04-08 07:09:23 +08:00
|
|
|
diag::ext_ms_reserved_user_defined_literal :
|
|
|
|
diag::ext_reserved_user_defined_literal)
|
2012-03-07 11:13:00 +08:00
|
|
|
<< FixItHint::CreateInsertion(getSourceLocation(CurPtr), " ");
|
|
|
|
return CurPtr;
|
|
|
|
}
|
|
|
|
|
2012-03-06 11:21:47 +08:00
|
|
|
Result.setFlag(Token::HasUDSuffix);
|
2012-03-05 12:02:15 +08:00
|
|
|
do {
|
|
|
|
CurPtr = ConsumeChar(CurPtr, Size, Result);
|
|
|
|
C = getCharAndSize(CurPtr, Size);
|
|
|
|
} while (isIdentifierBody(C));
|
|
|
|
}
|
|
|
|
return CurPtr;
|
|
|
|
}
|
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
/// LexStringLiteral - Lex the remainder of a string literal, after having lexed
|
2011-07-27 13:40:30 +08:00
|
|
|
/// either " or L" or u8" or u" or U".
|
2013-09-19 08:41:32 +08:00
|
|
|
bool Lexer::LexStringLiteral(Token &Result, const char *CurPtr,
|
2011-07-27 13:40:30 +08:00
|
|
|
tok::TokenKind Kind) {
|
2006-06-18 13:43:12 +08:00
|
|
|
const char *NulCharacter = 0; // Does this string contain the \0 character?
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-10-15 09:18:56 +08:00
|
|
|
if (!isLexingRawMode() &&
|
|
|
|
(Kind == tok::utf8_string_literal ||
|
|
|
|
Kind == tok::utf16_string_literal ||
|
2013-03-12 02:01:42 +08:00
|
|
|
Kind == tok::utf32_string_literal))
|
|
|
|
Diag(BufferPtr, getLangOpts().CPlusPlus
|
|
|
|
? diag::warn_cxx98_compat_unicode_literal
|
|
|
|
: diag::warn_c99_compat_unicode_literal);
|
2011-10-15 09:18:56 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
char C = getAndAdvanceChar(CurPtr, Result);
|
|
|
|
while (C != '"') {
|
2010-05-31 07:27:38 +08:00
|
|
|
// Skip escaped characters. Escaped newlines will already be processed by
|
|
|
|
// getAndAdvanceChar.
|
|
|
|
if (C == '\\')
|
2006-06-18 13:43:12 +08:00
|
|
|
C = getAndAdvanceChar(CurPtr, Result);
|
2010-05-31 06:59:50 +08:00
|
|
|
|
2010-05-31 07:27:38 +08:00
|
|
|
if (C == '\n' || C == '\r' || // Newline.
|
2010-05-31 06:59:50 +08:00
|
|
|
(C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
|
2012-06-28 15:51:56 +08:00
|
|
|
Diag(BufferPtr, diag::ext_unterminated_string);
|
2008-10-12 12:51:35 +08:00
|
|
|
FormTokenWithChars(Result, CurPtr-1, tok::unknown);
|
2013-09-19 08:41:32 +08:00
|
|
|
return true;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
2010-05-31 07:27:38 +08:00
|
|
|
|
2011-09-04 11:32:15 +08:00
|
|
|
if (C == 0) {
|
|
|
|
if (isCodeCompletionPoint(CurPtr-1)) {
|
|
|
|
PP->CodeCompleteNaturalLanguage();
|
|
|
|
FormTokenWithChars(Result, CurPtr-1, tok::unknown);
|
2013-09-19 08:41:32 +08:00
|
|
|
cutOffLexing();
|
|
|
|
return true;
|
2011-09-04 11:32:15 +08:00
|
|
|
}
|
|
|
|
|
2010-05-31 07:27:38 +08:00
|
|
|
NulCharacter = CurPtr-1;
|
2011-09-04 11:32:15 +08:00
|
|
|
}
|
2006-06-18 13:43:12 +08:00
|
|
|
C = getAndAdvanceChar(CurPtr, Result);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-03-05 12:02:15 +08:00
|
|
|
// If we are in C++11, lex the optional ud-suffix.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (getLangOpts().CPlusPlus)
|
2013-07-23 16:14:48 +08:00
|
|
|
CurPtr = LexUDSuffix(Result, CurPtr, true);
|
2012-03-05 12:02:15 +08:00
|
|
|
|
2006-07-20 14:02:19 +08:00
|
|
|
// If a nul character existed in the string, warn about it.
|
2008-11-22 10:02:22 +08:00
|
|
|
if (NulCharacter && !isLexingRawMode())
|
|
|
|
Diag(NulCharacter, diag::null_in_string);
|
2006-06-18 13:43:12 +08:00
|
|
|
|
2006-06-19 00:22:51 +08:00
|
|
|
// Update the location of the token as well as the BufferPtr instance var.
|
2009-01-27 03:29:26 +08:00
|
|
|
const char *TokStart = BufferPtr;
|
2011-07-27 13:40:30 +08:00
|
|
|
FormTokenWithChars(Result, CurPtr, Kind);
|
2009-01-27 03:29:26 +08:00
|
|
|
Result.setLiteralData(TokStart);
|
2013-09-19 08:41:32 +08:00
|
|
|
return true;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
|
2011-08-11 12:06:15 +08:00
|
|
|
/// LexRawStringLiteral - Lex the remainder of a raw string literal, after
|
|
|
|
/// having lexed R", LR", u8R", uR", or UR".
|
2013-09-19 08:41:32 +08:00
|
|
|
bool Lexer::LexRawStringLiteral(Token &Result, const char *CurPtr,
|
2011-08-11 12:06:15 +08:00
|
|
|
tok::TokenKind Kind) {
|
|
|
|
// This function doesn't use getAndAdvanceChar because C++0x [lex.pptoken]p3:
|
|
|
|
// Between the initial and final double quote characters of the raw string,
|
|
|
|
// any transformations performed in phases 1 and 2 (trigraphs,
|
|
|
|
// universal-character-names, and line splicing) are reverted.
|
|
|
|
|
2011-10-15 09:18:56 +08:00
|
|
|
if (!isLexingRawMode())
|
|
|
|
Diag(BufferPtr, diag::warn_cxx98_compat_raw_string_literal);
|
|
|
|
|
2011-08-11 12:06:15 +08:00
|
|
|
unsigned PrefixLen = 0;
|
|
|
|
|
|
|
|
while (PrefixLen != 16 && isRawStringDelimBody(CurPtr[PrefixLen]))
|
|
|
|
++PrefixLen;
|
|
|
|
|
|
|
|
// If the last character was not a '(', then we didn't lex a valid delimiter.
|
|
|
|
if (CurPtr[PrefixLen] != '(') {
|
|
|
|
if (!isLexingRawMode()) {
|
|
|
|
const char *PrefixEnd = &CurPtr[PrefixLen];
|
|
|
|
if (PrefixLen == 16) {
|
|
|
|
Diag(PrefixEnd, diag::err_raw_delim_too_long);
|
|
|
|
} else {
|
|
|
|
Diag(PrefixEnd, diag::err_invalid_char_raw_delim)
|
|
|
|
<< StringRef(PrefixEnd, 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Search for the next '"' in hopes of salvaging the lexer. Unfortunately,
|
|
|
|
// it's possible the '"' was intended to be part of the raw string, but
|
|
|
|
// there's not much we can do about that.
|
|
|
|
while (1) {
|
|
|
|
char C = *CurPtr++;
|
|
|
|
|
|
|
|
if (C == '"')
|
|
|
|
break;
|
|
|
|
if (C == 0 && CurPtr-1 == BufferEnd) {
|
|
|
|
--CurPtr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
FormTokenWithChars(Result, CurPtr, tok::unknown);
|
2013-09-19 08:41:32 +08:00
|
|
|
return true;
|
2011-08-11 12:06:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Save prefix and move CurPtr past it
|
|
|
|
const char *Prefix = CurPtr;
|
|
|
|
CurPtr += PrefixLen + 1; // skip over prefix and '('
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
char C = *CurPtr++;
|
|
|
|
|
|
|
|
if (C == ')') {
|
|
|
|
// Check for prefix match and closing quote.
|
|
|
|
if (strncmp(CurPtr, Prefix, PrefixLen) == 0 && CurPtr[PrefixLen] == '"') {
|
|
|
|
CurPtr += PrefixLen + 1; // skip over prefix and '"'
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else if (C == 0 && CurPtr-1 == BufferEnd) { // End of file.
|
|
|
|
if (!isLexingRawMode())
|
|
|
|
Diag(BufferPtr, diag::err_unterminated_raw_string)
|
|
|
|
<< StringRef(Prefix, PrefixLen);
|
|
|
|
FormTokenWithChars(Result, CurPtr-1, tok::unknown);
|
2013-09-19 08:41:32 +08:00
|
|
|
return true;
|
2011-08-11 12:06:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-05 12:02:15 +08:00
|
|
|
// If we are in C++11, lex the optional ud-suffix.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (getLangOpts().CPlusPlus)
|
2013-07-23 16:14:48 +08:00
|
|
|
CurPtr = LexUDSuffix(Result, CurPtr, true);
|
2012-03-05 12:02:15 +08:00
|
|
|
|
2011-08-11 12:06:15 +08:00
|
|
|
// Update the location of token as well as BufferPtr.
|
|
|
|
const char *TokStart = BufferPtr;
|
|
|
|
FormTokenWithChars(Result, CurPtr, Kind);
|
|
|
|
Result.setLiteralData(TokStart);
|
2013-09-19 08:41:32 +08:00
|
|
|
return true;
|
2011-08-11 12:06:15 +08:00
|
|
|
}
|
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
/// LexAngledStringLiteral - Lex the remainder of an angled string literal,
|
|
|
|
/// after having lexed the '<' character. This is used for #include filenames.
|
2013-09-19 08:41:32 +08:00
|
|
|
bool Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
|
2006-06-18 13:43:12 +08:00
|
|
|
const char *NulCharacter = 0; // Does this string contain the \0 character?
|
2009-04-18 07:56:52 +08:00
|
|
|
const char *AfterLessPos = CurPtr;
|
2006-06-18 13:43:12 +08:00
|
|
|
char C = getAndAdvanceChar(CurPtr, Result);
|
|
|
|
while (C != '>') {
|
|
|
|
// Skip escaped characters.
|
|
|
|
if (C == '\\') {
|
|
|
|
// Skip the escaped character.
|
2012-07-31 01:59:40 +08:00
|
|
|
getAndAdvanceChar(CurPtr, Result);
|
2006-06-18 13:43:12 +08:00
|
|
|
} else if (C == '\n' || C == '\r' || // Newline.
|
2011-09-04 11:32:15 +08:00
|
|
|
(C == 0 && (CurPtr-1 == BufferEnd || // End of file.
|
|
|
|
isCodeCompletionPoint(CurPtr-1)))) {
|
2009-04-18 07:56:52 +08:00
|
|
|
// If the filename is unterminated, then it must just be a lone <
|
|
|
|
// character. Return this as such.
|
|
|
|
FormTokenWithChars(Result, AfterLessPos, tok::less);
|
2013-09-19 08:41:32 +08:00
|
|
|
return true;
|
2006-06-18 13:43:12 +08:00
|
|
|
} else if (C == 0) {
|
|
|
|
NulCharacter = CurPtr-1;
|
|
|
|
}
|
|
|
|
C = getAndAdvanceChar(CurPtr, Result);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-07-20 14:02:19 +08:00
|
|
|
// If a nul character existed in the string, warn about it.
|
2008-11-22 10:02:22 +08:00
|
|
|
if (NulCharacter && !isLexingRawMode())
|
|
|
|
Diag(NulCharacter, diag::null_in_string);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-19 00:22:51 +08:00
|
|
|
// Update the location of token as well as BufferPtr.
|
2009-01-27 03:29:26 +08:00
|
|
|
const char *TokStart = BufferPtr;
|
2008-10-12 12:51:35 +08:00
|
|
|
FormTokenWithChars(Result, CurPtr, tok::angle_string_literal);
|
2009-01-27 03:29:26 +08:00
|
|
|
Result.setLiteralData(TokStart);
|
2013-09-19 08:41:32 +08:00
|
|
|
return true;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/// LexCharConstant - Lex the remainder of a character constant, after having
|
2011-07-27 13:40:30 +08:00
|
|
|
/// lexed either ' or L' or u' or U'.
|
2013-09-19 08:41:32 +08:00
|
|
|
bool Lexer::LexCharConstant(Token &Result, const char *CurPtr,
|
2011-07-27 13:40:30 +08:00
|
|
|
tok::TokenKind Kind) {
|
2006-06-18 13:43:12 +08:00
|
|
|
const char *NulCharacter = 0; // Does this character contain the \0 character?
|
|
|
|
|
2011-10-15 09:18:56 +08:00
|
|
|
if (!isLexingRawMode() &&
|
2013-03-12 02:01:42 +08:00
|
|
|
(Kind == tok::utf16_char_constant || Kind == tok::utf32_char_constant))
|
|
|
|
Diag(BufferPtr, getLangOpts().CPlusPlus
|
|
|
|
? diag::warn_cxx98_compat_unicode_literal
|
|
|
|
: diag::warn_c99_compat_unicode_literal);
|
2011-10-15 09:18:56 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
char C = getAndAdvanceChar(CurPtr, Result);
|
|
|
|
if (C == '\'') {
|
2012-03-11 15:00:24 +08:00
|
|
|
if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
|
2012-06-28 15:51:56 +08:00
|
|
|
Diag(BufferPtr, diag::ext_empty_character);
|
2008-10-12 12:51:35 +08:00
|
|
|
FormTokenWithChars(Result, CurPtr, tok::unknown);
|
2013-09-19 08:41:32 +08:00
|
|
|
return true;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-07-08 07:24:27 +08:00
|
|
|
while (C != '\'') {
|
|
|
|
// Skip escaped characters.
|
2012-11-18 04:25:54 +08:00
|
|
|
if (C == '\\')
|
|
|
|
C = getAndAdvanceChar(CurPtr, Result);
|
|
|
|
|
|
|
|
if (C == '\n' || C == '\r' || // Newline.
|
|
|
|
(C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
|
2012-06-28 15:51:56 +08:00
|
|
|
Diag(BufferPtr, diag::ext_unterminated_char);
|
2010-07-08 07:24:27 +08:00
|
|
|
FormTokenWithChars(Result, CurPtr-1, tok::unknown);
|
2013-09-19 08:41:32 +08:00
|
|
|
return true;
|
2012-11-18 04:25:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (C == 0) {
|
2011-09-04 11:32:15 +08:00
|
|
|
if (isCodeCompletionPoint(CurPtr-1)) {
|
|
|
|
PP->CodeCompleteNaturalLanguage();
|
|
|
|
FormTokenWithChars(Result, CurPtr-1, tok::unknown);
|
2013-09-19 08:41:32 +08:00
|
|
|
cutOffLexing();
|
|
|
|
return true;
|
2011-09-04 11:32:15 +08:00
|
|
|
}
|
|
|
|
|
2010-07-08 07:24:27 +08:00
|
|
|
NulCharacter = CurPtr-1;
|
|
|
|
}
|
|
|
|
C = getAndAdvanceChar(CurPtr, Result);
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-03-05 12:02:15 +08:00
|
|
|
// If we are in C++11, lex the optional ud-suffix.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (getLangOpts().CPlusPlus)
|
2013-07-23 16:14:48 +08:00
|
|
|
CurPtr = LexUDSuffix(Result, CurPtr, false);
|
2012-03-05 12:02:15 +08:00
|
|
|
|
2010-07-08 07:24:27 +08:00
|
|
|
// If a nul character existed in the character, warn about it.
|
2008-11-22 10:02:22 +08:00
|
|
|
if (NulCharacter && !isLexingRawMode())
|
|
|
|
Diag(NulCharacter, diag::null_in_char);
|
2006-06-18 13:43:12 +08:00
|
|
|
|
2006-06-19 00:22:51 +08:00
|
|
|
// Update the location of token as well as BufferPtr.
|
2009-01-27 03:29:26 +08:00
|
|
|
const char *TokStart = BufferPtr;
|
2011-07-27 13:40:30 +08:00
|
|
|
FormTokenWithChars(Result, CurPtr, Kind);
|
2009-01-27 03:29:26 +08:00
|
|
|
Result.setLiteralData(TokStart);
|
2013-09-19 08:41:32 +08:00
|
|
|
return true;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// SkipWhitespace - Efficiently skip over a series of whitespace characters.
|
|
|
|
/// Update BufferPtr to point to the next non-whitespace character and return.
|
2008-10-12 12:05:48 +08:00
|
|
|
///
|
|
|
|
/// This method forms a token and returns true if KeepWhitespaceMode is enabled.
|
|
|
|
///
|
2013-09-19 08:41:32 +08:00
|
|
|
bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr,
|
|
|
|
bool &TokAtPhysicalStartOfLine) {
|
2006-06-18 13:43:12 +08:00
|
|
|
// Whitespace - Skip it, then return the token after the whitespace.
|
2013-02-22 02:53:19 +08:00
|
|
|
bool SawNewline = isVerticalWhitespace(CurPtr[-1]);
|
|
|
|
|
2013-05-10 10:36:35 +08:00
|
|
|
unsigned char Char = *CurPtr;
|
|
|
|
|
|
|
|
// Skip consecutive spaces efficiently.
|
2006-06-18 13:43:12 +08:00
|
|
|
while (1) {
|
|
|
|
// Skip horizontal whitespace very aggressively.
|
|
|
|
while (isHorizontalWhitespace(Char))
|
|
|
|
Char = *++CurPtr;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-11-25 08:20:22 +08:00
|
|
|
// Otherwise if we have something other than whitespace, we're done.
|
2013-02-22 02:53:19 +08:00
|
|
|
if (!isVerticalWhitespace(Char))
|
2006-06-18 13:43:12 +08:00
|
|
|
break;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
if (ParsingPreprocessorDirective) {
|
|
|
|
// End of preprocessor directive line, let LexTokenInternal handle this.
|
|
|
|
BufferPtr = CurPtr;
|
2008-10-12 12:05:48 +08:00
|
|
|
return false;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2013-05-10 10:36:35 +08:00
|
|
|
// OK, but handle newline.
|
2013-02-22 02:53:19 +08:00
|
|
|
SawNewline = true;
|
2006-06-18 13:43:12 +08:00
|
|
|
Char = *++CurPtr;
|
|
|
|
}
|
|
|
|
|
2008-10-12 12:05:48 +08:00
|
|
|
// If the client wants us to return whitespace, return it now.
|
|
|
|
if (isKeepWhitespaceMode()) {
|
2008-10-12 12:51:35 +08:00
|
|
|
FormTokenWithChars(Result, CurPtr, tok::unknown);
|
2013-09-19 08:41:32 +08:00
|
|
|
if (SawNewline) {
|
2013-02-22 02:53:19 +08:00
|
|
|
IsAtStartOfLine = true;
|
2013-09-19 08:41:32 +08:00
|
|
|
IsAtPhysicalStartOfLine = true;
|
|
|
|
}
|
2013-02-22 02:53:19 +08:00
|
|
|
// FIXME: The next token will not have LeadingSpace set.
|
2008-10-12 12:05:48 +08:00
|
|
|
return true;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2013-02-22 02:53:19 +08:00
|
|
|
// If this isn't immediately after a newline, there is leading space.
|
|
|
|
char PrevChar = CurPtr[-1];
|
|
|
|
bool HasLeadingSpace = !isVerticalWhitespace(PrevChar);
|
|
|
|
|
|
|
|
Result.setFlagValue(Token::LeadingSpace, HasLeadingSpace);
|
2013-09-19 08:41:32 +08:00
|
|
|
if (SawNewline) {
|
2013-02-22 02:53:19 +08:00
|
|
|
Result.setFlag(Token::StartOfLine);
|
2013-09-19 08:41:32 +08:00
|
|
|
TokAtPhysicalStartOfLine = true;
|
|
|
|
}
|
2013-02-22 02:53:19 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
BufferPtr = CurPtr;
|
2008-10-12 12:05:48 +08:00
|
|
|
return false;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
|
2012-11-11 15:02:14 +08:00
|
|
|
/// We have just read the // characters from input. Skip until we find the
|
|
|
|
/// newline character thats terminate the comment. Then update BufferPtr and
|
|
|
|
/// return.
|
2010-01-19 06:35:47 +08:00
|
|
|
///
|
|
|
|
/// If we're in KeepCommentMode or any CommentHandler has inserted
|
|
|
|
/// some tokens, this will store the first token and return true.
|
2013-09-19 08:41:32 +08:00
|
|
|
bool Lexer::SkipLineComment(Token &Result, const char *CurPtr,
|
|
|
|
bool &TokAtPhysicalStartOfLine) {
|
2012-11-11 15:02:14 +08:00
|
|
|
// If Line comments aren't explicitly enabled for this language, emit an
|
2006-06-18 13:43:12 +08:00
|
|
|
// extension warning.
|
2012-11-11 15:02:14 +08:00
|
|
|
if (!LangOpts.LineComment && !isLexingRawMode()) {
|
|
|
|
Diag(BufferPtr, diag::ext_line_comment);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// Mark them enabled so we only emit one warning for this translation
|
|
|
|
// unit.
|
2012-11-11 15:02:14 +08:00
|
|
|
LangOpts.LineComment = true;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// Scan over the body of the comment. The common case, when scanning, is that
|
|
|
|
// the comment contains normal ascii characters with nothing interesting in
|
|
|
|
// them. As such, optimize for this case with the inner loop.
|
|
|
|
char C;
|
|
|
|
do {
|
|
|
|
C = *CurPtr;
|
|
|
|
// Skip over characters in the fast loop.
|
|
|
|
while (C != 0 && // Potentially EOF.
|
|
|
|
C != '\n' && C != '\r') // Newline or DOS-style newline.
|
|
|
|
C = *++CurPtr;
|
|
|
|
|
2011-09-05 15:19:39 +08:00
|
|
|
const char *NextLine = CurPtr;
|
|
|
|
if (C != 0) {
|
|
|
|
// We found a newline, see if it's escaped.
|
|
|
|
const char *EscapePtr = CurPtr-1;
|
|
|
|
while (isHorizontalWhitespace(*EscapePtr)) // Skip whitespace.
|
|
|
|
--EscapePtr;
|
|
|
|
|
|
|
|
if (*EscapePtr == '\\') // Escaped newline.
|
|
|
|
CurPtr = EscapePtr;
|
|
|
|
else if (EscapePtr[0] == '/' && EscapePtr[-1] == '?' &&
|
|
|
|
EscapePtr[-2] == '?') // Trigraph-escaped newline.
|
|
|
|
CurPtr = EscapePtr-2;
|
|
|
|
else
|
|
|
|
break; // This is a newline, we're done.
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// Otherwise, this is a hard case. Fall back on getAndAdvanceChar to
|
2008-12-12 15:34:39 +08:00
|
|
|
// properly decode the character. Read it in raw mode to avoid emitting
|
|
|
|
// diagnostics about things like trigraphs. If we see an escaped newline,
|
|
|
|
// we'll handle it below.
|
2006-06-18 13:43:12 +08:00
|
|
|
const char *OldPtr = CurPtr;
|
2008-12-12 15:34:39 +08:00
|
|
|
bool OldRawMode = isLexingRawMode();
|
|
|
|
LexingRawMode = true;
|
2006-06-18 13:43:12 +08:00
|
|
|
C = getAndAdvanceChar(CurPtr, Result);
|
2008-12-12 15:34:39 +08:00
|
|
|
LexingRawMode = OldRawMode;
|
2009-04-05 08:26:41 +08:00
|
|
|
|
2011-09-05 15:19:39 +08:00
|
|
|
// If we only read only one character, then no special handling is needed.
|
|
|
|
// We're done and can skip forward to the newline.
|
|
|
|
if (C != 0 && CurPtr == OldPtr+1) {
|
|
|
|
CurPtr = NextLine;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// If we read multiple characters, and one of those characters was a \r or
|
2007-06-09 14:07:22 +08:00
|
|
|
// \n, then we had an escaped newline within the comment. Emit diagnostic
|
|
|
|
// unless the next line is also a // comment.
|
|
|
|
if (CurPtr != OldPtr+1 && C != '/' && CurPtr[0] != '/') {
|
2006-06-18 13:43:12 +08:00
|
|
|
for (; OldPtr != CurPtr; ++OldPtr)
|
|
|
|
if (OldPtr[0] == '\n' || OldPtr[0] == '\r') {
|
2007-06-09 14:07:22 +08:00
|
|
|
// Okay, we found a // comment that ends in a newline, if the next
|
|
|
|
// line is also a // comment, but has spaces, don't emit a diagnostic.
|
2011-09-05 15:19:35 +08:00
|
|
|
if (isWhitespace(C)) {
|
2007-06-09 14:07:22 +08:00
|
|
|
const char *ForwardPtr = CurPtr;
|
2011-09-05 15:19:35 +08:00
|
|
|
while (isWhitespace(*ForwardPtr)) // Skip whitespace.
|
2007-06-09 14:07:22 +08:00
|
|
|
++ForwardPtr;
|
|
|
|
if (ForwardPtr[0] == '/' && ForwardPtr[1] == '/')
|
|
|
|
break;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-11-22 10:02:22 +08:00
|
|
|
if (!isLexingRawMode())
|
2012-11-11 15:02:14 +08:00
|
|
|
Diag(OldPtr-1, diag::ext_multi_line_line_comment);
|
2006-06-18 14:48:37 +08:00
|
|
|
break;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-08-26 01:04:25 +08:00
|
|
|
if (CurPtr == BufferEnd+1) {
|
|
|
|
--CurPtr;
|
|
|
|
break;
|
|
|
|
}
|
2011-09-04 11:32:15 +08:00
|
|
|
|
|
|
|
if (C == '\0' && isCodeCompletionPoint(CurPtr-1)) {
|
|
|
|
PP->CodeCompleteNaturalLanguage();
|
|
|
|
cutOffLexing();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
} while (C != '\n' && C != '\r');
|
|
|
|
|
2010-02-04 05:06:21 +08:00
|
|
|
// Found but did not consume the newline. Notify comment handlers about the
|
|
|
|
// comment unless we're in a #if 0 block.
|
|
|
|
if (PP && !isLexingRawMode() &&
|
|
|
|
PP->HandleComment(Result, SourceRange(getSourceLocation(BufferPtr),
|
|
|
|
getSourceLocation(CurPtr)))) {
|
2010-01-19 06:35:47 +08:00
|
|
|
BufferPtr = CurPtr;
|
|
|
|
return true; // A token has to be returned.
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-07-29 14:30:25 +08:00
|
|
|
// If we are returning comments as tokens, return this comment as a token.
|
2008-10-12 11:22:02 +08:00
|
|
|
if (inKeepCommentMode())
|
2012-11-11 15:02:14 +08:00
|
|
|
return SaveLineComment(Result, CurPtr);
|
2006-06-18 13:43:12 +08:00
|
|
|
|
|
|
|
// If we are inside a preprocessor directive and we see the end of line,
|
2011-02-28 10:37:51 +08:00
|
|
|
// return immediately, so that the lexer can return this as an EOD token.
|
2006-07-29 14:30:25 +08:00
|
|
|
if (ParsingPreprocessorDirective || CurPtr == BufferEnd) {
|
2006-06-18 13:43:12 +08:00
|
|
|
BufferPtr = CurPtr;
|
2008-10-12 12:15:42 +08:00
|
|
|
return false;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// Otherwise, eat the \n character. We don't care if this is a \n\r or
|
2008-10-12 08:23:07 +08:00
|
|
|
// \r\n sequence. This is an efficiency hack (because we know the \n can't
|
2008-10-12 12:05:48 +08:00
|
|
|
// contribute to another token), it isn't needed for correctness. Note that
|
|
|
|
// this is ok even in KeepWhitespaceMode, because we would have returned the
|
|
|
|
/// comment above in that mode.
|
2006-06-18 13:43:12 +08:00
|
|
|
++CurPtr;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// The next returned token is at the start of the line.
|
2007-07-21 00:59:19 +08:00
|
|
|
Result.setFlag(Token::StartOfLine);
|
2013-09-19 08:41:32 +08:00
|
|
|
TokAtPhysicalStartOfLine = true;
|
2006-06-18 13:43:12 +08:00
|
|
|
// No leading whitespace seen so far.
|
2007-07-21 00:59:19 +08:00
|
|
|
Result.clearFlag(Token::LeadingSpace);
|
2006-06-18 13:43:12 +08:00
|
|
|
BufferPtr = CurPtr;
|
2008-10-12 12:15:42 +08:00
|
|
|
return false;
|
2006-07-29 14:30:25 +08:00
|
|
|
}
|
2006-06-18 13:43:12 +08:00
|
|
|
|
2012-11-11 15:02:14 +08:00
|
|
|
/// If in save-comment mode, package up this Line comment in an appropriate
|
|
|
|
/// way and return it.
|
|
|
|
bool Lexer::SaveLineComment(Token &Result, const char *CurPtr) {
|
2008-10-12 12:51:35 +08:00
|
|
|
// If we're not in a preprocessor directive, just return the // comment
|
|
|
|
// directly.
|
|
|
|
FormTokenWithChars(Result, CurPtr, tok::comment);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-06-07 02:52:13 +08:00
|
|
|
if (!ParsingPreprocessorDirective || LexingRawMode)
|
2008-10-12 12:51:35 +08:00
|
|
|
return true;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-11-11 15:02:14 +08:00
|
|
|
// If this Line-style comment is in a macro definition, transmogrify it into
|
2008-10-12 12:51:35 +08:00
|
|
|
// a C-style block comment.
|
2010-03-17 06:30:13 +08:00
|
|
|
bool Invalid = false;
|
|
|
|
std::string Spelling = PP->getSpelling(Result, &Invalid);
|
|
|
|
if (Invalid)
|
|
|
|
return true;
|
|
|
|
|
2012-11-11 15:02:14 +08:00
|
|
|
assert(Spelling[0] == '/' && Spelling[1] == '/' && "Not line comment?");
|
2008-10-12 12:51:35 +08:00
|
|
|
Spelling[1] = '*'; // Change prefix to "/*".
|
|
|
|
Spelling += "*/"; // add suffix.
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-10-12 12:51:35 +08:00
|
|
|
Result.setKind(tok::comment);
|
2012-09-25 05:07:17 +08:00
|
|
|
PP->CreateString(Spelling, Result,
|
2011-10-04 02:39:03 +08:00
|
|
|
Result.getLocation(), Result.getLocation());
|
2008-10-12 12:15:42 +08:00
|
|
|
return true;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
|
2006-06-18 14:48:37 +08:00
|
|
|
/// isBlockCommentEndOfEscapedNewLine - Return true if the specified newline
|
2012-06-07 02:43:20 +08:00
|
|
|
/// character (either \\n or \\r) is part of an escaped newline sequence. Issue
|
|
|
|
/// a diagnostic if so. We know that the newline is inside of a block comment.
|
2009-09-09 23:08:12 +08:00
|
|
|
static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr,
|
2006-06-18 14:53:56 +08:00
|
|
|
Lexer *L) {
|
2006-06-18 13:43:12 +08:00
|
|
|
assert(CurPtr[0] == '\n' || CurPtr[0] == '\r');
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// Back up off the newline.
|
|
|
|
--CurPtr;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// If this is a two-character newline sequence, skip the other character.
|
|
|
|
if (CurPtr[0] == '\n' || CurPtr[0] == '\r') {
|
|
|
|
// \n\n or \r\r -> not escaped newline.
|
|
|
|
if (CurPtr[0] == CurPtr[1])
|
|
|
|
return false;
|
|
|
|
// \n\r or \r\n -> skip the newline.
|
|
|
|
--CurPtr;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// If we have horizontal whitespace, skip over it. We allow whitespace
|
|
|
|
// between the slash and newline.
|
|
|
|
bool HasSpace = false;
|
|
|
|
while (isHorizontalWhitespace(*CurPtr) || *CurPtr == 0) {
|
|
|
|
--CurPtr;
|
|
|
|
HasSpace = true;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// If we have a slash, we know this is an escaped newline.
|
|
|
|
if (*CurPtr == '\\') {
|
2006-06-18 14:48:37 +08:00
|
|
|
if (CurPtr[-1] != '*') return false;
|
2006-06-18 13:43:12 +08:00
|
|
|
} else {
|
|
|
|
// It isn't a slash, is it the ?? / trigraph?
|
2006-06-18 14:48:37 +08:00
|
|
|
if (CurPtr[0] != '/' || CurPtr[-1] != '?' || CurPtr[-2] != '?' ||
|
|
|
|
CurPtr[-3] != '*')
|
2006-06-18 13:43:12 +08:00
|
|
|
return false;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 14:48:37 +08:00
|
|
|
// This is the trigraph ending the comment. Emit a stern warning!
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr -= 2;
|
|
|
|
|
|
|
|
// If no trigraphs are enabled, warn that we ignored this trigraph and
|
|
|
|
// ignore this * character.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (!L->getLangOpts().Trigraphs) {
|
2008-11-22 10:02:22 +08:00
|
|
|
if (!L->isLexingRawMode())
|
|
|
|
L->Diag(CurPtr, diag::trigraph_ignored_block_comment);
|
2006-06-18 14:48:37 +08:00
|
|
|
return false;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
2008-11-22 10:02:22 +08:00
|
|
|
if (!L->isLexingRawMode())
|
|
|
|
L->Diag(CurPtr, diag::trigraph_ends_block_comment);
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// Warn about having an escaped newline between the */ characters.
|
2008-11-22 10:02:22 +08:00
|
|
|
if (!L->isLexingRawMode())
|
|
|
|
L->Diag(CurPtr, diag::escaped_newline_block_comment_end);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// If there was space between the backslash and newline, warn about it.
|
2008-11-22 10:02:22 +08:00
|
|
|
if (HasSpace && !L->isLexingRawMode())
|
|
|
|
L->Diag(CurPtr, diag::backslash_newline_space);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 14:48:37 +08:00
|
|
|
return true;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
|
2006-10-27 12:42:31 +08:00
|
|
|
#ifdef __SSE2__
|
|
|
|
#include <emmintrin.h>
|
2006-10-31 04:01:22 +08:00
|
|
|
#elif __ALTIVEC__
|
|
|
|
#include <altivec.h>
|
|
|
|
#undef bool
|
2006-10-27 12:42:31 +08:00
|
|
|
#endif
|
|
|
|
|
2012-06-17 11:40:43 +08:00
|
|
|
/// We have just read from input the / and * characters that started a comment.
|
|
|
|
/// Read until we find the * and / characters that terminate the comment.
|
|
|
|
/// Note that we don't bother decoding trigraphs or escaped newlines in block
|
|
|
|
/// comments, because they cannot cause the comment to end. The only thing
|
|
|
|
/// that can happen is the comment could end with an escaped newline between
|
|
|
|
/// the terminating * and /.
|
2008-10-12 12:15:42 +08:00
|
|
|
///
|
2010-01-19 06:35:47 +08:00
|
|
|
/// If we're in KeepCommentMode or any CommentHandler has inserted
|
|
|
|
/// some tokens, this will store the first token and return true.
|
2013-09-19 08:41:32 +08:00
|
|
|
bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr,
|
|
|
|
bool &TokAtPhysicalStartOfLine) {
|
2006-06-18 13:43:12 +08:00
|
|
|
// Scan one character past where we should, looking for a '/' character. Once
|
2011-04-15 13:22:18 +08:00
|
|
|
// we find it, check to see if it was preceded by a *. This common
|
2006-06-18 13:43:12 +08:00
|
|
|
// optimization helps people who like to put a lot of * characters in their
|
|
|
|
// comments.
|
2007-07-22 07:43:37 +08:00
|
|
|
|
|
|
|
// The first character we get with newlines and trigraphs skipped to handle
|
|
|
|
// the degenerate /*/ case below correctly if the * has an escaped newline
|
|
|
|
// after it.
|
|
|
|
unsigned CharSize;
|
|
|
|
unsigned char C = getCharAndSize(CurPtr, CharSize);
|
|
|
|
CurPtr += CharSize;
|
2006-06-18 13:43:12 +08:00
|
|
|
if (C == 0 && CurPtr == BufferEnd+1) {
|
2011-09-04 11:32:15 +08:00
|
|
|
if (!isLexingRawMode())
|
2008-10-12 09:31:51 +08:00
|
|
|
Diag(BufferPtr, diag::err_unterminated_block_comment);
|
2008-10-12 12:19:49 +08:00
|
|
|
--CurPtr;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-10-12 12:19:49 +08:00
|
|
|
// KeepWhitespaceMode should return this broken comment as a token. Since
|
|
|
|
// it isn't a well formed comment, just return it as an 'unknown' token.
|
|
|
|
if (isKeepWhitespaceMode()) {
|
2008-10-12 12:51:35 +08:00
|
|
|
FormTokenWithChars(Result, CurPtr, tok::unknown);
|
2008-10-12 12:19:49 +08:00
|
|
|
return true;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-10-12 12:19:49 +08:00
|
|
|
BufferPtr = CurPtr;
|
2008-10-12 12:15:42 +08:00
|
|
|
return false;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-07-22 07:43:37 +08:00
|
|
|
// Check to see if the first character after the '/*' is another /. If so,
|
|
|
|
// then this slash does not end the block comment, it is part of it.
|
|
|
|
if (C == '/')
|
|
|
|
C = *CurPtr++;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
while (1) {
|
2006-10-27 12:12:35 +08:00
|
|
|
// Skip over all non-interesting characters until we find end of buffer or a
|
|
|
|
// (probably ending) '/' character.
|
2011-09-04 11:32:15 +08:00
|
|
|
if (CurPtr + 24 < BufferEnd &&
|
|
|
|
// If there is a code-completion point avoid the fast scan because it
|
|
|
|
// doesn't check for '\0'.
|
|
|
|
!(PP && PP->getCodeCompletionFileLoc() == FileLoc)) {
|
2006-10-27 12:12:35 +08:00
|
|
|
// While not aligned to a 16-byte boundary.
|
|
|
|
while (C != '/' && ((intptr_t)CurPtr & 0x0F) != 0)
|
|
|
|
C = *CurPtr++;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-10-27 12:12:35 +08:00
|
|
|
if (C == '/') goto FoundSlash;
|
2006-10-27 12:42:31 +08:00
|
|
|
|
|
|
|
#ifdef __SSE2__
|
2011-11-23 02:56:46 +08:00
|
|
|
__m128i Slashes = _mm_set1_epi8('/');
|
|
|
|
while (CurPtr+16 <= BufferEnd) {
|
2012-09-06 23:59:27 +08:00
|
|
|
int cmp = _mm_movemask_epi8(_mm_cmpeq_epi8(*(const __m128i*)CurPtr,
|
|
|
|
Slashes));
|
2011-11-23 02:56:46 +08:00
|
|
|
if (cmp != 0) {
|
2011-11-23 04:39:31 +08:00
|
|
|
// Adjust the pointer to point directly after the first slash. It's
|
|
|
|
// not necessary to set C here, it will be overwritten at the end of
|
|
|
|
// the outer loop.
|
2013-05-25 05:42:04 +08:00
|
|
|
CurPtr += llvm::countTrailingZeros<unsigned>(cmp) + 1;
|
2011-11-23 02:56:46 +08:00
|
|
|
goto FoundSlash;
|
|
|
|
}
|
2006-10-27 12:42:31 +08:00
|
|
|
CurPtr += 16;
|
2011-11-23 02:56:46 +08:00
|
|
|
}
|
2006-10-31 04:01:22 +08:00
|
|
|
#elif __ALTIVEC__
|
|
|
|
__vector unsigned char Slashes = {
|
2009-09-09 23:08:12 +08:00
|
|
|
'/', '/', '/', '/', '/', '/', '/', '/',
|
2006-10-31 04:01:22 +08:00
|
|
|
'/', '/', '/', '/', '/', '/', '/', '/'
|
|
|
|
};
|
|
|
|
while (CurPtr+16 <= BufferEnd &&
|
|
|
|
!vec_any_eq(*(vector unsigned char*)CurPtr, Slashes))
|
|
|
|
CurPtr += 16;
|
2009-09-09 23:08:12 +08:00
|
|
|
#else
|
2006-10-27 12:42:31 +08:00
|
|
|
// Scan for '/' quickly. Many block comments are very large.
|
2006-10-27 12:12:35 +08:00
|
|
|
while (CurPtr[0] != '/' &&
|
|
|
|
CurPtr[1] != '/' &&
|
|
|
|
CurPtr[2] != '/' &&
|
|
|
|
CurPtr[3] != '/' &&
|
|
|
|
CurPtr+4 < BufferEnd) {
|
|
|
|
CurPtr += 4;
|
|
|
|
}
|
2006-10-27 12:42:31 +08:00
|
|
|
#endif
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-10-27 12:42:31 +08:00
|
|
|
// It has to be one of the bytes scanned, increment to it and read one.
|
2006-10-27 12:12:35 +08:00
|
|
|
C = *CurPtr++;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-10-27 12:42:31 +08:00
|
|
|
// Loop to scan the remainder.
|
2006-06-18 13:43:12 +08:00
|
|
|
while (C != '/' && C != '\0')
|
|
|
|
C = *CurPtr++;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
if (C == '/') {
|
2011-11-23 02:56:46 +08:00
|
|
|
FoundSlash:
|
2006-06-18 13:43:12 +08:00
|
|
|
if (CurPtr[-2] == '*') // We found the final */. We're done!
|
|
|
|
break;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) {
|
2006-06-18 14:53:56 +08:00
|
|
|
if (isEndOfBlockCommentWithEscapedNewLine(CurPtr-2, this)) {
|
2006-06-18 13:43:12 +08:00
|
|
|
// We found the final */, though it had an escaped newline between the
|
|
|
|
// * and /. We're done!
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (CurPtr[0] == '*' && CurPtr[1] != '/') {
|
|
|
|
// If this is a /* inside of the comment, emit a warning. Don't do this
|
|
|
|
// if this is a /*/, which will end the comment. This misses cases with
|
|
|
|
// embedded escaped newlines, but oh well.
|
2008-11-22 10:02:22 +08:00
|
|
|
if (!isLexingRawMode())
|
|
|
|
Diag(CurPtr-1, diag::warn_nested_block_comment);
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
} else if (C == 0 && CurPtr == BufferEnd+1) {
|
2011-09-04 11:32:15 +08:00
|
|
|
if (!isLexingRawMode())
|
2008-11-22 10:02:22 +08:00
|
|
|
Diag(BufferPtr, diag::err_unterminated_block_comment);
|
2006-06-18 13:43:12 +08:00
|
|
|
// Note: the user probably forgot a */. We could continue immediately
|
|
|
|
// after the /*, but this would involve lexing a lot of what really is the
|
|
|
|
// comment, which surely would confuse the parser.
|
2008-10-12 12:19:49 +08:00
|
|
|
--CurPtr;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-10-12 12:19:49 +08:00
|
|
|
// KeepWhitespaceMode should return this broken comment as a token. Since
|
|
|
|
// it isn't a well formed comment, just return it as an 'unknown' token.
|
|
|
|
if (isKeepWhitespaceMode()) {
|
2008-10-12 12:51:35 +08:00
|
|
|
FormTokenWithChars(Result, CurPtr, tok::unknown);
|
2008-10-12 12:19:49 +08:00
|
|
|
return true;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-10-12 12:19:49 +08:00
|
|
|
BufferPtr = CurPtr;
|
2008-10-12 12:15:42 +08:00
|
|
|
return false;
|
2011-09-04 11:32:15 +08:00
|
|
|
} else if (C == '\0' && isCodeCompletionPoint(CurPtr-1)) {
|
|
|
|
PP->CodeCompleteNaturalLanguage();
|
|
|
|
cutOffLexing();
|
|
|
|
return false;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
2011-09-04 11:32:15 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
C = *CurPtr++;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-02-04 05:06:21 +08:00
|
|
|
// Notify comment handlers about the comment unless we're in a #if 0 block.
|
|
|
|
if (PP && !isLexingRawMode() &&
|
|
|
|
PP->HandleComment(Result, SourceRange(getSourceLocation(BufferPtr),
|
|
|
|
getSourceLocation(CurPtr)))) {
|
2010-01-19 06:35:47 +08:00
|
|
|
BufferPtr = CurPtr;
|
|
|
|
return true; // A token has to be returned.
|
|
|
|
}
|
Add support for retrieving the Doxygen comment associated with a given
declaration in the AST.
The new ASTContext::getCommentForDecl function searches for a comment
that is attached to the given declaration, and returns that comment,
which may be composed of several comment blocks.
Comments are always available in an AST. However, to avoid harming
performance, we don't actually parse the comments. Rather, we keep the
source ranges of all of the comments within a large, sorted vector,
then lazily extract comments via a binary search in that vector only
when needed (which never occurs in a "normal" compile).
Comments are written to a precompiled header/AST file as a blob of
source ranges. That blob is only lazily loaded when one requests a
comment for a declaration (this never occurs in a "normal" compile).
The indexer testbed now supports comment extraction. When the
-point-at location points to a declaration with a Doxygen-style
comment, the indexer testbed prints the associated comment
block(s). See test/Index/comments.c for an example.
Some notes:
- We don't actually attempt to parse the comment blocks themselves,
beyond identifying them as Doxygen comment blocks to associate them
with a declaration.
- We won't find comment blocks that aren't adjacent to the
declaration, because we start our search based on the location of
the declaration.
- We don't go through the necessary hops to find, for example,
whether some redeclaration of a declaration has comments when our
current declaration does not. Similarly, we don't attempt to
associate a \param Foo marker in a function body comment with the
parameter named Foo (although that is certainly possible).
- Verification of my "no performance impact" claims is still "to be
done".
llvm-svn: 74704
2009-07-03 01:08:52 +08:00
|
|
|
|
2006-07-29 14:30:25 +08:00
|
|
|
// If we are returning comments as tokens, return this comment as a token.
|
2008-10-12 11:22:02 +08:00
|
|
|
if (inKeepCommentMode()) {
|
2008-10-12 12:51:35 +08:00
|
|
|
FormTokenWithChars(Result, CurPtr, tok::comment);
|
2008-10-12 12:15:42 +08:00
|
|
|
return true;
|
2006-07-29 14:30:25 +08:00
|
|
|
}
|
2006-06-18 13:43:12 +08:00
|
|
|
|
|
|
|
// It is common for the tokens immediately after a /**/ comment to be
|
|
|
|
// whitespace. Instead of going through the big switch, handle it
|
2008-10-12 12:05:48 +08:00
|
|
|
// efficiently now. This is safe even in KeepWhitespaceMode because we would
|
|
|
|
// have already returned above with the comment as a token.
|
2006-06-18 13:43:12 +08:00
|
|
|
if (isHorizontalWhitespace(*CurPtr)) {
|
2013-09-19 08:41:32 +08:00
|
|
|
SkipWhitespace(Result, CurPtr+1, TokAtPhysicalStartOfLine);
|
2008-10-12 12:15:42 +08:00
|
|
|
return false;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, just return so that the next character will be lexed as a token.
|
|
|
|
BufferPtr = CurPtr;
|
2007-07-21 00:59:19 +08:00
|
|
|
Result.setFlag(Token::LeadingSpace);
|
2008-10-12 12:15:42 +08:00
|
|
|
return false;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Primary Lexing Entry Points
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// ReadToEndOfLine - Read the rest of the current preprocessor line as an
|
|
|
|
/// uninterpreted string. This switches the lexer out of directive mode.
|
2012-05-19 03:32:16 +08:00
|
|
|
void Lexer::ReadToEndOfLine(SmallVectorImpl<char> *Result) {
|
2006-06-18 13:43:12 +08:00
|
|
|
assert(ParsingPreprocessorDirective && ParsingFilename == false &&
|
|
|
|
"Must be in a preprocessing directive!");
|
2007-07-21 00:59:19 +08:00
|
|
|
Token Tmp;
|
2006-06-18 13:43:12 +08:00
|
|
|
|
|
|
|
// CurPtr - Cache BufferPtr in an automatic variable.
|
|
|
|
const char *CurPtr = BufferPtr;
|
|
|
|
while (1) {
|
|
|
|
char Char = getAndAdvanceChar(CurPtr, Tmp);
|
|
|
|
switch (Char) {
|
|
|
|
default:
|
2012-05-19 03:32:16 +08:00
|
|
|
if (Result)
|
|
|
|
Result->push_back(Char);
|
2006-06-18 13:43:12 +08:00
|
|
|
break;
|
|
|
|
case 0: // Null.
|
|
|
|
// Found end of file?
|
|
|
|
if (CurPtr-1 != BufferEnd) {
|
2011-09-04 11:32:15 +08:00
|
|
|
if (isCodeCompletionPoint(CurPtr-1)) {
|
|
|
|
PP->CodeCompleteNaturalLanguage();
|
|
|
|
cutOffLexing();
|
2012-05-19 03:32:16 +08:00
|
|
|
return;
|
2011-09-04 11:32:15 +08:00
|
|
|
}
|
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// Nope, normal character, continue.
|
2012-05-19 03:32:16 +08:00
|
|
|
if (Result)
|
|
|
|
Result->push_back(Char);
|
2006-06-18 13:43:12 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
// FALL THROUGH.
|
|
|
|
case '\r':
|
|
|
|
case '\n':
|
|
|
|
// Okay, we found the end of the line. First, back up past the \0, \r, \n.
|
|
|
|
assert(CurPtr[-1] == Char && "Trigraphs for newline?");
|
|
|
|
BufferPtr = CurPtr-1;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-02-28 10:37:51 +08:00
|
|
|
// Next, lex the character, which should handle the EOD transition.
|
2006-06-18 14:48:37 +08:00
|
|
|
Lex(Tmp);
|
2010-08-26 01:04:25 +08:00
|
|
|
if (Tmp.is(tok::code_completion)) {
|
2011-09-04 11:32:15 +08:00
|
|
|
if (PP)
|
|
|
|
PP->CodeCompleteNaturalLanguage();
|
2010-08-26 01:04:25 +08:00
|
|
|
Lex(Tmp);
|
|
|
|
}
|
2011-02-28 10:37:51 +08:00
|
|
|
assert(Tmp.is(tok::eod) && "Unexpected token!");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-05-19 03:32:16 +08:00
|
|
|
// Finally, we're done;
|
|
|
|
return;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// LexEndOfFile - CurPtr points to the end of this file. Handle this
|
|
|
|
/// condition, reporting diagnostics and handling other edge cases as required.
|
2006-07-18 14:36:12 +08:00
|
|
|
/// This returns true if Result contains a token, false if PP.Lex should be
|
|
|
|
/// called again.
|
2007-07-21 00:59:19 +08:00
|
|
|
bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
|
2006-06-18 13:43:12 +08:00
|
|
|
// If we hit the end of the file while parsing a preprocessor directive,
|
|
|
|
// end the preprocessor directive first. The next token returned will
|
|
|
|
// then be the end of file.
|
|
|
|
if (ParsingPreprocessorDirective) {
|
|
|
|
// Done parsing the "line".
|
|
|
|
ParsingPreprocessorDirective = false;
|
2006-06-19 00:22:51 +08:00
|
|
|
// Update the location of token as well as BufferPtr.
|
2011-02-28 10:37:51 +08:00
|
|
|
FormTokenWithChars(Result, CurPtr, tok::eod);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-07-29 14:30:25 +08:00
|
|
|
// Restore comment saving mode, in case it was disabled for directive.
|
2013-02-22 02:53:19 +08:00
|
|
|
resetExtendedTokenMode();
|
2006-07-18 14:36:12 +08:00
|
|
|
return true; // Have a token.
|
2009-09-09 23:08:12 +08:00
|
|
|
}
|
2009-09-22 00:56:56 +08:00
|
|
|
|
|
|
|
// If we are in raw mode, return this event as an EOF token. Let the caller
|
|
|
|
// that put us in raw mode handle the event.
|
|
|
|
if (isLexingRawMode()) {
|
|
|
|
Result.startToken();
|
|
|
|
BufferPtr = BufferEnd;
|
|
|
|
FormTokenWithChars(Result, BufferEnd, tok::eof);
|
|
|
|
return true;
|
|
|
|
}
|
Initial implementation of a code-completion interface in Clang. In
essence, code completion is triggered by a magic "code completion"
token produced by the lexer [*], which the parser recognizes at
certain points in the grammar. The parser then calls into the Action
object with the appropriate CodeCompletionXXX action.
Sema implements the CodeCompletionXXX callbacks by performing minimal
translation, then forwarding them to a CodeCompletionConsumer
subclass, which uses the results of semantic analysis to provide
code-completion results. At present, only a single, "printing" code
completion consumer is available, for regression testing and
debugging. However, the design is meant to permit other
code-completion consumers.
This initial commit contains two code-completion actions: one for
member access, e.g., "x." or "p->", and one for
nested-name-specifiers, e.g., "std::". More code-completion actions
will follow, along with improved gathering of code-completion results
for the various contexts.
[*] In the current -code-completion-dump testing/debugging mode, the
file is truncated at the completion point and EOF is translated into
"code completion".
llvm-svn: 82166
2009-09-18 05:32:03 +08:00
|
|
|
|
2010-08-25 03:08:16 +08:00
|
|
|
// Issue diagnostics for unterminated #if and missing newline.
|
|
|
|
|
2006-07-19 14:31:49 +08:00
|
|
|
// If we are in a #if directive, emit an error.
|
|
|
|
while (!ConditionalStack.empty()) {
|
2011-09-04 11:32:15 +08:00
|
|
|
if (PP->getCodeCompletionFileLoc() != FileLoc)
|
2010-08-13 01:04:55 +08:00
|
|
|
PP->Diag(ConditionalStack.back().IfLoc,
|
|
|
|
diag::err_pp_unterminated_conditional);
|
2006-07-19 14:31:49 +08:00
|
|
|
ConditionalStack.pop_back();
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-04-12 13:54:25 +08:00
|
|
|
// C99 5.1.1.2p2: If the file is non-empty and didn't end in a newline, issue
|
|
|
|
// a pedwarn.
|
2013-08-23 23:42:01 +08:00
|
|
|
if (CurPtr != BufferStart && (CurPtr[-1] != '\n' && CurPtr[-1] != '\r')) {
|
|
|
|
DiagnosticsEngine &Diags = PP->getDiagnostics();
|
|
|
|
SourceLocation EndLoc = getSourceLocation(BufferEnd);
|
|
|
|
unsigned DiagID;
|
|
|
|
|
|
|
|
if (LangOpts.CPlusPlus11) {
|
|
|
|
// C++11 [lex.phases] 2.2 p2
|
|
|
|
// Prefer the C++98 pedantic compatibility warning over the generic,
|
|
|
|
// non-extension, user-requested "missing newline at EOF" warning.
|
|
|
|
if (Diags.getDiagnosticLevel(diag::warn_cxx98_compat_no_newline_eof,
|
|
|
|
EndLoc) != DiagnosticsEngine::Ignored) {
|
|
|
|
DiagID = diag::warn_cxx98_compat_no_newline_eof;
|
|
|
|
} else {
|
|
|
|
DiagID = diag::warn_no_newline_eof;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
DiagID = diag::ext_no_newline_eof;
|
|
|
|
}
|
|
|
|
|
|
|
|
Diag(BufferEnd, DiagID)
|
|
|
|
<< FixItHint::CreateInsertion(EndLoc, "\n");
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
BufferPtr = CurPtr;
|
2006-07-19 14:31:49 +08:00
|
|
|
|
|
|
|
// Finally, let the preprocessor handle this.
|
2012-06-16 07:33:51 +08:00
|
|
|
return PP->HandleEndOfFile(Result, isPragmaLexer());
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
|
2006-07-11 13:46:12 +08:00
|
|
|
/// isNextPPTokenLParen - Return 1 if the next unexpanded token lexed from
|
|
|
|
/// the specified lexer will return a tok::l_paren token, 0 if it is something
|
|
|
|
/// else and 2 if there are no more tokens in the buffer controlled by the
|
|
|
|
/// lexer.
|
|
|
|
unsigned Lexer::isNextPPTokenLParen() {
|
|
|
|
assert(!LexingRawMode && "How can we expand a macro from a skipping buffer?");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-07-11 13:46:12 +08:00
|
|
|
// Switch to 'skipping' mode. This will ensure that we can lex a token
|
|
|
|
// without emitting diagnostics, disables macro expansion, and will cause EOF
|
|
|
|
// to return an EOF token instead of popping the include stack.
|
|
|
|
LexingRawMode = true;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-07-11 13:46:12 +08:00
|
|
|
// Save state that can be changed while lexing so that we can restore it.
|
|
|
|
const char *TmpBufferPtr = BufferPtr;
|
2009-04-24 15:15:46 +08:00
|
|
|
bool inPPDirectiveMode = ParsingPreprocessorDirective;
|
2013-09-19 08:41:32 +08:00
|
|
|
bool atStartOfLine = IsAtStartOfLine;
|
|
|
|
bool atPhysicalStartOfLine = IsAtPhysicalStartOfLine;
|
|
|
|
bool leadingSpace = HasLeadingSpace;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-07-21 00:59:19 +08:00
|
|
|
Token Tok;
|
2013-09-19 08:41:32 +08:00
|
|
|
Lex(Tok);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-07-11 13:46:12 +08:00
|
|
|
// Restore state that may have changed.
|
|
|
|
BufferPtr = TmpBufferPtr;
|
2009-04-24 15:15:46 +08:00
|
|
|
ParsingPreprocessorDirective = inPPDirectiveMode;
|
2013-09-19 08:41:32 +08:00
|
|
|
HasLeadingSpace = leadingSpace;
|
|
|
|
IsAtStartOfLine = atStartOfLine;
|
|
|
|
IsAtPhysicalStartOfLine = atPhysicalStartOfLine;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-07-11 13:46:12 +08:00
|
|
|
// Restore the lexer back to non-skipping mode.
|
|
|
|
LexingRawMode = false;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-10-10 02:02:16 +08:00
|
|
|
if (Tok.is(tok::eof))
|
2006-07-11 13:46:12 +08:00
|
|
|
return 2;
|
2007-10-10 02:02:16 +08:00
|
|
|
return Tok.is(tok::l_paren);
|
2006-07-11 13:46:12 +08:00
|
|
|
}
|
|
|
|
|
2012-06-17 11:40:43 +08:00
|
|
|
/// \brief Find the end of a version control conflict marker.
|
2011-10-12 08:37:51 +08:00
|
|
|
static const char *FindConflictEnd(const char *CurPtr, const char *BufferEnd,
|
|
|
|
ConflictMarkerKind CMK) {
|
|
|
|
const char *Terminator = CMK == CMK_Perforce ? "<<<<\n" : ">>>>>>>";
|
|
|
|
size_t TermLen = CMK == CMK_Perforce ? 5 : 7;
|
|
|
|
StringRef RestOfBuffer(CurPtr+TermLen, BufferEnd-CurPtr-TermLen);
|
|
|
|
size_t Pos = RestOfBuffer.find(Terminator);
|
2011-07-23 18:55:15 +08:00
|
|
|
while (Pos != StringRef::npos) {
|
2009-12-14 14:16:57 +08:00
|
|
|
// Must occur at start of line.
|
|
|
|
if (RestOfBuffer[Pos-1] != '\r' &&
|
|
|
|
RestOfBuffer[Pos-1] != '\n') {
|
2011-10-12 08:37:51 +08:00
|
|
|
RestOfBuffer = RestOfBuffer.substr(Pos+TermLen);
|
|
|
|
Pos = RestOfBuffer.find(Terminator);
|
2009-12-14 14:16:57 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
return RestOfBuffer.data()+Pos;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// IsStartOfConflictMarker - If the specified pointer is the start of a version
|
|
|
|
/// control conflict marker like '<<<<<<<', recognize it as such, emit an error
|
|
|
|
/// and recover nicely. This returns true if it is a conflict marker and false
|
|
|
|
/// if not.
|
|
|
|
bool Lexer::IsStartOfConflictMarker(const char *CurPtr) {
|
|
|
|
// Only a conflict marker if it starts at the beginning of a line.
|
|
|
|
if (CurPtr != BufferStart &&
|
|
|
|
CurPtr[-1] != '\n' && CurPtr[-1] != '\r')
|
|
|
|
return false;
|
|
|
|
|
2011-10-12 08:37:51 +08:00
|
|
|
// Check to see if we have <<<<<<< or >>>>.
|
|
|
|
if ((BufferEnd-CurPtr < 8 || StringRef(CurPtr, 7) != "<<<<<<<") &&
|
|
|
|
(BufferEnd-CurPtr < 6 || StringRef(CurPtr, 5) != ">>>> "))
|
2009-12-14 14:16:57 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// If we have a situation where we don't care about conflict markers, ignore
|
|
|
|
// it.
|
2011-10-12 08:37:51 +08:00
|
|
|
if (CurrentConflictMarkerState || isLexingRawMode())
|
2009-12-14 14:16:57 +08:00
|
|
|
return false;
|
|
|
|
|
2011-10-12 08:37:51 +08:00
|
|
|
ConflictMarkerKind Kind = *CurPtr == '<' ? CMK_Normal : CMK_Perforce;
|
|
|
|
|
|
|
|
// Check to see if there is an ending marker somewhere in the buffer at the
|
|
|
|
// start of a line to terminate this conflict marker.
|
|
|
|
if (FindConflictEnd(CurPtr, BufferEnd, Kind)) {
|
2009-12-14 14:16:57 +08:00
|
|
|
// We found a match. We are really in a conflict marker.
|
|
|
|
// Diagnose this, and ignore to the end of line.
|
|
|
|
Diag(CurPtr, diag::err_conflict_marker);
|
2011-10-12 08:37:51 +08:00
|
|
|
CurrentConflictMarkerState = Kind;
|
2009-12-14 14:16:57 +08:00
|
|
|
|
|
|
|
// Skip ahead to the end of line. We know this exists because the
|
|
|
|
// end-of-conflict marker starts with \r or \n.
|
|
|
|
while (*CurPtr != '\r' && *CurPtr != '\n') {
|
|
|
|
assert(CurPtr != BufferEnd && "Didn't find end of line");
|
|
|
|
++CurPtr;
|
|
|
|
}
|
|
|
|
BufferPtr = CurPtr;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// No end of conflict marker found.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-10-12 08:37:51 +08:00
|
|
|
/// HandleEndOfConflictMarker - If this is a '====' or '||||' or '>>>>', or if
|
|
|
|
/// it is '<<<<' and the conflict marker started with a '>>>>' marker, then it
|
|
|
|
/// is the end of a conflict marker. Handle it by ignoring up until the end of
|
|
|
|
/// the line. This returns true if it is a conflict marker and false if not.
|
2009-12-14 14:16:57 +08:00
|
|
|
bool Lexer::HandleEndOfConflictMarker(const char *CurPtr) {
|
|
|
|
// Only a conflict marker if it starts at the beginning of a line.
|
|
|
|
if (CurPtr != BufferStart &&
|
|
|
|
CurPtr[-1] != '\n' && CurPtr[-1] != '\r')
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If we have a situation where we don't care about conflict markers, ignore
|
|
|
|
// it.
|
2011-10-12 08:37:51 +08:00
|
|
|
if (!CurrentConflictMarkerState || isLexingRawMode())
|
2009-12-14 14:16:57 +08:00
|
|
|
return false;
|
|
|
|
|
2011-10-12 08:37:51 +08:00
|
|
|
// Check to see if we have the marker (4 characters in a row).
|
|
|
|
for (unsigned i = 1; i != 4; ++i)
|
2009-12-14 14:16:57 +08:00
|
|
|
if (CurPtr[i] != CurPtr[0])
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If we do have it, search for the end of the conflict marker. This could
|
|
|
|
// fail if it got skipped with a '#if 0' or something. Note that CurPtr might
|
|
|
|
// be the end of conflict marker.
|
2011-10-12 08:37:51 +08:00
|
|
|
if (const char *End = FindConflictEnd(CurPtr, BufferEnd,
|
|
|
|
CurrentConflictMarkerState)) {
|
2009-12-14 14:16:57 +08:00
|
|
|
CurPtr = End;
|
|
|
|
|
|
|
|
// Skip ahead to the end of line.
|
|
|
|
while (CurPtr != BufferEnd && *CurPtr != '\r' && *CurPtr != '\n')
|
|
|
|
++CurPtr;
|
|
|
|
|
|
|
|
BufferPtr = CurPtr;
|
|
|
|
|
|
|
|
// No longer in the conflict marker.
|
2011-10-12 08:37:51 +08:00
|
|
|
CurrentConflictMarkerState = CMK_None;
|
2009-12-14 14:16:57 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-09-04 11:32:15 +08:00
|
|
|
bool Lexer::isCodeCompletionPoint(const char *CurPtr) const {
|
|
|
|
if (PP && PP->isCodeCompletionEnabled()) {
|
2011-09-20 04:40:19 +08:00
|
|
|
SourceLocation Loc = FileLoc.getLocWithOffset(CurPtr-BufferStart);
|
2011-09-04 11:32:15 +08:00
|
|
|
return Loc == PP->getCodeCompletionLoc();
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-01-25 04:50:46 +08:00
|
|
|
uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc,
|
|
|
|
Token *Result) {
|
|
|
|
unsigned CharSize;
|
|
|
|
char Kind = getCharAndSize(StartPtr, CharSize);
|
|
|
|
|
|
|
|
unsigned NumHexDigits;
|
|
|
|
if (Kind == 'u')
|
|
|
|
NumHexDigits = 4;
|
|
|
|
else if (Kind == 'U')
|
|
|
|
NumHexDigits = 8;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
|
2013-01-28 04:12:04 +08:00
|
|
|
if (!LangOpts.CPlusPlus && !LangOpts.C99) {
|
2013-01-29 01:49:02 +08:00
|
|
|
if (Result && !isLexingRawMode())
|
|
|
|
Diag(SlashLoc, diag::warn_ucn_not_valid_in_c89);
|
2013-01-28 04:12:04 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-01-25 04:50:46 +08:00
|
|
|
const char *CurPtr = StartPtr + CharSize;
|
|
|
|
const char *KindLoc = &CurPtr[-1];
|
|
|
|
|
|
|
|
uint32_t CodePoint = 0;
|
|
|
|
for (unsigned i = 0; i < NumHexDigits; ++i) {
|
|
|
|
char C = getCharAndSize(CurPtr, CharSize);
|
|
|
|
|
|
|
|
unsigned Value = llvm::hexDigitValue(C);
|
|
|
|
if (Value == -1U) {
|
|
|
|
if (Result && !isLexingRawMode()) {
|
|
|
|
if (i == 0) {
|
|
|
|
Diag(BufferPtr, diag::warn_ucn_escape_no_digits)
|
|
|
|
<< StringRef(KindLoc, 1);
|
|
|
|
} else {
|
|
|
|
Diag(BufferPtr, diag::warn_ucn_escape_incomplete);
|
2013-01-25 04:50:52 +08:00
|
|
|
|
|
|
|
// If the user wrote \U1234, suggest a fixit to \u.
|
|
|
|
if (i == 4 && NumHexDigits == 8) {
|
2013-02-09 09:10:25 +08:00
|
|
|
CharSourceRange URange = makeCharRange(*this, KindLoc, KindLoc + 1);
|
2013-01-25 04:50:52 +08:00
|
|
|
Diag(KindLoc, diag::note_ucn_four_not_eight)
|
|
|
|
<< FixItHint::CreateReplacement(URange, "u");
|
|
|
|
}
|
2013-01-25 04:50:46 +08:00
|
|
|
}
|
|
|
|
}
|
2013-01-28 04:12:04 +08:00
|
|
|
|
2013-01-25 04:50:46 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
CodePoint <<= 4;
|
|
|
|
CodePoint += Value;
|
|
|
|
|
|
|
|
CurPtr += CharSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Result) {
|
|
|
|
Result->setFlag(Token::HasUCN);
|
2013-01-25 22:57:21 +08:00
|
|
|
if (CurPtr - StartPtr == (ptrdiff_t)NumHexDigits + 2)
|
2013-01-25 04:50:46 +08:00
|
|
|
StartPtr = CurPtr;
|
|
|
|
else
|
|
|
|
while (StartPtr != CurPtr)
|
|
|
|
(void)getAndAdvanceChar(StartPtr, *Result);
|
|
|
|
} else {
|
|
|
|
StartPtr = CurPtr;
|
|
|
|
}
|
|
|
|
|
2013-10-21 13:02:28 +08:00
|
|
|
// Don't apply C family restrictions to UCNs in assembly mode
|
|
|
|
if (LangOpts.AsmPreprocessor)
|
|
|
|
return CodePoint;
|
|
|
|
|
2013-01-25 04:50:46 +08:00
|
|
|
// C99 6.4.3p2: A universal character name shall not specify a character whose
|
|
|
|
// short identifier is less than 00A0 other than 0024 ($), 0040 (@), or
|
|
|
|
// 0060 (`), nor one in the range D800 through DFFF inclusive.)
|
|
|
|
// C++11 [lex.charset]p2: If the hexadecimal value for a
|
|
|
|
// universal-character-name corresponds to a surrogate code point (in the
|
|
|
|
// range 0xD800-0xDFFF, inclusive), the program is ill-formed. Additionally,
|
|
|
|
// if the hexadecimal value for a universal-character-name outside the
|
|
|
|
// c-char-sequence, s-char-sequence, or r-char-sequence of a character or
|
|
|
|
// string literal corresponds to a control character (in either of the
|
|
|
|
// ranges 0x00-0x1F or 0x7F-0x9F, both inclusive) or to a character in the
|
|
|
|
// basic source character set, the program is ill-formed.
|
|
|
|
if (CodePoint < 0xA0) {
|
|
|
|
if (CodePoint == 0x24 || CodePoint == 0x40 || CodePoint == 0x60)
|
|
|
|
return CodePoint;
|
|
|
|
|
|
|
|
// We don't use isLexingRawMode() here because we need to warn about bad
|
|
|
|
// UCNs even when skipping preprocessing tokens in a #if block.
|
|
|
|
if (Result && PP) {
|
|
|
|
if (CodePoint < 0x20 || CodePoint >= 0x7F)
|
|
|
|
Diag(BufferPtr, diag::err_ucn_control_character);
|
|
|
|
else {
|
|
|
|
char C = static_cast<char>(CodePoint);
|
|
|
|
Diag(BufferPtr, diag::err_ucn_escape_basic_scs) << StringRef(&C, 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2013-02-09 09:10:25 +08:00
|
|
|
|
|
|
|
} else if (CodePoint >= 0xD800 && CodePoint <= 0xDFFF) {
|
2013-01-25 04:50:46 +08:00
|
|
|
// C++03 allows UCNs representing surrogate characters. C99 and C++11 don't.
|
2013-02-09 09:10:25 +08:00
|
|
|
// We don't use isLexingRawMode() here because we need to diagnose bad
|
2013-01-25 04:50:46 +08:00
|
|
|
// UCNs even when skipping preprocessing tokens in a #if block.
|
2013-02-09 09:10:25 +08:00
|
|
|
if (Result && PP) {
|
|
|
|
if (LangOpts.CPlusPlus && !LangOpts.CPlusPlus11)
|
|
|
|
Diag(BufferPtr, diag::warn_ucn_escape_surrogate);
|
|
|
|
else
|
|
|
|
Diag(BufferPtr, diag::err_ucn_escape_invalid);
|
|
|
|
}
|
2013-01-25 04:50:46 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return CodePoint;
|
|
|
|
}
|
|
|
|
|
2013-09-19 08:41:32 +08:00
|
|
|
bool Lexer::CheckUnicodeWhitespace(Token &Result, uint32_t C,
|
|
|
|
const char *CurPtr) {
|
2013-08-29 20:12:31 +08:00
|
|
|
static const llvm::sys::UnicodeCharSet UnicodeWhitespaceChars(
|
|
|
|
UnicodeWhitespaceCharRanges);
|
2013-01-30 09:52:57 +08:00
|
|
|
if (!isLexingRawMode() && !PP->isPreprocessedOutput() &&
|
2013-08-29 20:12:31 +08:00
|
|
|
UnicodeWhitespaceChars.contains(C)) {
|
2013-01-30 09:52:57 +08:00
|
|
|
Diag(BufferPtr, diag::ext_unicode_whitespace)
|
2013-02-09 09:10:25 +08:00
|
|
|
<< makeCharRange(*this, BufferPtr, CurPtr);
|
2013-01-25 04:50:50 +08:00
|
|
|
|
|
|
|
Result.setFlag(Token::LeadingSpace);
|
2013-09-19 08:41:32 +08:00
|
|
|
return true;
|
2013-01-25 04:50:50 +08:00
|
|
|
}
|
2013-09-19 08:41:32 +08:00
|
|
|
return false;
|
|
|
|
}
|
2013-01-25 04:50:50 +08:00
|
|
|
|
2013-09-19 08:41:32 +08:00
|
|
|
bool Lexer::LexUnicode(Token &Result, uint32_t C, const char *CurPtr) {
|
2013-02-09 09:10:25 +08:00
|
|
|
if (isAllowedIDChar(C, LangOpts) && isAllowedInitiallyIDChar(C, LangOpts)) {
|
|
|
|
if (!isLexingRawMode() && !ParsingPreprocessorDirective &&
|
|
|
|
!PP->isPreprocessedOutput()) {
|
|
|
|
maybeDiagnoseIDCharCompat(PP->getDiagnostics(), C,
|
|
|
|
makeCharRange(*this, BufferPtr, CurPtr),
|
|
|
|
/*IsFirst=*/true);
|
|
|
|
}
|
|
|
|
|
2013-01-25 04:50:46 +08:00
|
|
|
MIOpt.ReadToken();
|
|
|
|
return LexIdentifier(Result, CurPtr);
|
|
|
|
}
|
|
|
|
|
2013-02-01 03:48:48 +08:00
|
|
|
if (!isLexingRawMode() && !ParsingPreprocessorDirective &&
|
|
|
|
!PP->isPreprocessedOutput() &&
|
2013-02-09 09:10:25 +08:00
|
|
|
!isASCII(*BufferPtr) && !isAllowedIDChar(C, LangOpts)) {
|
2013-01-25 04:50:46 +08:00
|
|
|
// Non-ASCII characters tend to creep into source code unintentionally.
|
|
|
|
// Instead of letting the parser complain about the unknown token,
|
|
|
|
// just drop the character.
|
|
|
|
// Note that we can /only/ do this when the non-ASCII character is actually
|
|
|
|
// spelled as Unicode, not written as a UCN. The standard requires that
|
|
|
|
// we not throw away any possible preprocessor tokens, but there's a
|
|
|
|
// loophole in the mapping of Unicode characters to basic character set
|
|
|
|
// characters that allows us to map these particular characters to, say,
|
|
|
|
// whitespace.
|
2013-01-30 09:52:57 +08:00
|
|
|
Diag(BufferPtr, diag::err_non_ascii)
|
2013-02-09 09:10:25 +08:00
|
|
|
<< FixItHint::CreateRemoval(makeCharRange(*this, BufferPtr, CurPtr));
|
2013-01-25 04:50:46 +08:00
|
|
|
|
|
|
|
BufferPtr = CurPtr;
|
2013-09-19 08:41:32 +08:00
|
|
|
return false;
|
2013-01-25 04:50:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we have an explicit UCN or a character that's unlikely to show
|
|
|
|
// up by accident.
|
|
|
|
MIOpt.ReadToken();
|
|
|
|
FormTokenWithChars(Result, CurPtr, tok::unknown);
|
2013-09-19 08:41:32 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Lexer::PropagateLineStartLeadingSpaceInfo(Token &Result) {
|
|
|
|
IsAtStartOfLine = Result.isAtStartOfLine();
|
|
|
|
HasLeadingSpace = Result.hasLeadingSpace();
|
|
|
|
HasLeadingEmptyMacro = Result.hasLeadingEmptyMacro();
|
|
|
|
// Note that this doesn't affect IsAtPhysicalStartOfLine.
|
2013-01-25 04:50:46 +08:00
|
|
|
}
|
|
|
|
|
2013-09-19 08:41:32 +08:00
|
|
|
bool Lexer::Lex(Token &Result) {
|
|
|
|
// Start a new token.
|
|
|
|
Result.startToken();
|
|
|
|
|
|
|
|
// Set up misc whitespace flags for LexTokenInternal.
|
|
|
|
if (IsAtStartOfLine) {
|
|
|
|
Result.setFlag(Token::StartOfLine);
|
|
|
|
IsAtStartOfLine = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (HasLeadingSpace) {
|
|
|
|
Result.setFlag(Token::LeadingSpace);
|
|
|
|
HasLeadingSpace = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (HasLeadingEmptyMacro) {
|
|
|
|
Result.setFlag(Token::LeadingEmptyMacro);
|
|
|
|
HasLeadingEmptyMacro = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool atPhysicalStartOfLine = IsAtPhysicalStartOfLine;
|
|
|
|
IsAtPhysicalStartOfLine = false;
|
2013-09-19 09:51:23 +08:00
|
|
|
bool isRawLex = isLexingRawMode();
|
|
|
|
(void) isRawLex;
|
|
|
|
bool returnedToken = LexTokenInternal(Result, atPhysicalStartOfLine);
|
|
|
|
// (After the LexTokenInternal call, the lexer might be destroyed.)
|
|
|
|
assert((returnedToken || !isRawLex) && "Raw lex must succeed");
|
|
|
|
return returnedToken;
|
2013-09-19 08:41:32 +08:00
|
|
|
}
|
2006-06-18 13:43:12 +08:00
|
|
|
|
|
|
|
/// LexTokenInternal - This implements a simple C family lexer. It is an
|
|
|
|
/// extremely performance critical piece of code. This assumes that the buffer
|
2009-07-07 13:05:42 +08:00
|
|
|
/// has a null character at the end of the file. This returns a preprocessing
|
|
|
|
/// token, not a normal token, as such, it is an internal interface. It assumes
|
|
|
|
/// that the Flags of result have been cleared before calling this.
|
2013-09-19 08:41:32 +08:00
|
|
|
bool Lexer::LexTokenInternal(Token &Result, bool TokAtPhysicalStartOfLine) {
|
2006-06-18 13:43:12 +08:00
|
|
|
LexNextToken:
|
|
|
|
// New token, can't need cleaning yet.
|
2007-07-21 00:59:19 +08:00
|
|
|
Result.clearFlag(Token::NeedsCleaning);
|
2006-10-14 13:19:21 +08:00
|
|
|
Result.setIdentifierInfo(0);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// CurPtr - Cache BufferPtr in an automatic variable.
|
|
|
|
const char *CurPtr = BufferPtr;
|
|
|
|
|
2006-07-10 14:34:27 +08:00
|
|
|
// Small amounts of horizontal whitespace is very common between tokens.
|
|
|
|
if ((*CurPtr == ' ') || (*CurPtr == '\t')) {
|
|
|
|
++CurPtr;
|
|
|
|
while ((*CurPtr == ' ') || (*CurPtr == '\t'))
|
|
|
|
++CurPtr;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-10-12 12:05:48 +08:00
|
|
|
// If we are keeping whitespace and other tokens, just return what we just
|
|
|
|
// skipped. The next lexer invocation will return the token after the
|
|
|
|
// whitespace.
|
|
|
|
if (isKeepWhitespaceMode()) {
|
2008-10-12 12:51:35 +08:00
|
|
|
FormTokenWithChars(Result, CurPtr, tok::unknown);
|
2013-02-22 02:53:19 +08:00
|
|
|
// FIXME: The next token will not have LeadingSpace set.
|
2013-09-19 08:41:32 +08:00
|
|
|
return true;
|
2008-10-12 12:05:48 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-07-10 14:34:27 +08:00
|
|
|
BufferPtr = CurPtr;
|
2007-07-21 00:59:19 +08:00
|
|
|
Result.setFlag(Token::LeadingSpace);
|
2006-07-10 14:34:27 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
unsigned SizeTmp, SizeTmp2; // Temporaries for use in cases below.
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// Read a character, advancing over it.
|
|
|
|
char Char = getAndAdvanceChar(CurPtr, Result);
|
2008-10-12 12:51:35 +08:00
|
|
|
tok::TokenKind Kind;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
switch (Char) {
|
|
|
|
case 0: // Null.
|
|
|
|
// Found end of file?
|
2013-09-19 08:41:32 +08:00
|
|
|
if (CurPtr-1 == BufferEnd)
|
|
|
|
return LexEndOfFile(Result, CurPtr-1);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-09-04 11:32:15 +08:00
|
|
|
// Check if we are performing code completion.
|
|
|
|
if (isCodeCompletionPoint(CurPtr-1)) {
|
|
|
|
// Return the code-completion token.
|
|
|
|
Result.startToken();
|
|
|
|
FormTokenWithChars(Result, CurPtr, tok::code_completion);
|
2013-09-19 08:41:32 +08:00
|
|
|
return true;
|
2011-09-04 11:32:15 +08:00
|
|
|
}
|
|
|
|
|
2008-11-22 10:02:22 +08:00
|
|
|
if (!isLexingRawMode())
|
|
|
|
Diag(CurPtr-1, diag::null_in_file);
|
2007-07-21 00:59:19 +08:00
|
|
|
Result.setFlag(Token::LeadingSpace);
|
2013-09-19 08:41:32 +08:00
|
|
|
if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
|
|
|
|
return true; // KeepWhitespaceMode
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2013-09-19 08:41:32 +08:00
|
|
|
// We know the lexer hasn't changed, so just try again with this lexer.
|
|
|
|
// (We manually eliminate the tail call to avoid recursion.)
|
|
|
|
goto LexNextToken;
|
2009-12-17 13:29:40 +08:00
|
|
|
|
|
|
|
case 26: // DOS & CP/M EOF: "^Z".
|
|
|
|
// If we're in Microsoft extensions mode, treat this as end of file.
|
2013-09-19 08:41:32 +08:00
|
|
|
if (LangOpts.MicrosoftExt)
|
|
|
|
return LexEndOfFile(Result, CurPtr-1);
|
|
|
|
|
2009-12-17 13:29:40 +08:00
|
|
|
// If Microsoft extensions are disabled, this is just random garbage.
|
|
|
|
Kind = tok::unknown;
|
|
|
|
break;
|
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
case '\n':
|
|
|
|
case '\r':
|
|
|
|
// If we are inside a preprocessor directive and we see the end of line,
|
2011-02-28 10:37:51 +08:00
|
|
|
// we know we are done with the directive, so return an EOD token.
|
2006-06-18 13:43:12 +08:00
|
|
|
if (ParsingPreprocessorDirective) {
|
|
|
|
// Done parsing the "line".
|
|
|
|
ParsingPreprocessorDirective = false;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-07-29 14:30:25 +08:00
|
|
|
// Restore comment saving mode, in case it was disabled for directive.
|
2012-06-15 08:47:13 +08:00
|
|
|
if (PP)
|
2013-02-22 02:53:19 +08:00
|
|
|
resetExtendedTokenMode();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// Since we consumed a newline, we are back at the start of a line.
|
|
|
|
IsAtStartOfLine = true;
|
2013-09-19 08:41:32 +08:00
|
|
|
IsAtPhysicalStartOfLine = true;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-02-28 10:37:51 +08:00
|
|
|
Kind = tok::eod;
|
2006-06-18 13:43:12 +08:00
|
|
|
break;
|
|
|
|
}
|
2013-02-22 02:53:19 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// No leading whitespace seen so far.
|
2007-07-21 00:59:19 +08:00
|
|
|
Result.clearFlag(Token::LeadingSpace);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2013-09-19 08:41:32 +08:00
|
|
|
if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
|
|
|
|
return true; // KeepWhitespaceMode
|
|
|
|
|
|
|
|
// We only saw whitespace, so just try again with this lexer.
|
|
|
|
// (We manually eliminate the tail call to avoid recursion.)
|
|
|
|
goto LexNextToken;
|
2006-06-18 13:43:12 +08:00
|
|
|
case ' ':
|
|
|
|
case '\t':
|
|
|
|
case '\f':
|
|
|
|
case '\v':
|
2007-07-22 14:29:05 +08:00
|
|
|
SkipHorizontalWhitespace:
|
2007-07-21 00:59:19 +08:00
|
|
|
Result.setFlag(Token::LeadingSpace);
|
2013-09-19 08:41:32 +08:00
|
|
|
if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
|
|
|
|
return true; // KeepWhitespaceMode
|
2007-07-22 14:29:05 +08:00
|
|
|
|
|
|
|
SkipIgnoredUnits:
|
|
|
|
CurPtr = BufferPtr;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-07-22 14:29:05 +08:00
|
|
|
// If the next token is obviously a // or /* */ comment, skip it efficiently
|
|
|
|
// too (without going through the big switch stmt).
|
2009-01-17 06:39:25 +08:00
|
|
|
if (CurPtr[0] == '/' && CurPtr[1] == '/' && !inKeepCommentMode() &&
|
2013-08-29 04:53:32 +08:00
|
|
|
LangOpts.LineComment &&
|
|
|
|
(LangOpts.CPlusPlus || !LangOpts.TraditionalCPP)) {
|
2013-09-19 08:41:32 +08:00
|
|
|
if (SkipLineComment(Result, CurPtr+2, TokAtPhysicalStartOfLine))
|
|
|
|
return true; // There is a token to return.
|
2007-07-22 14:29:05 +08:00
|
|
|
goto SkipIgnoredUnits;
|
2008-10-12 11:22:02 +08:00
|
|
|
} else if (CurPtr[0] == '/' && CurPtr[1] == '*' && !inKeepCommentMode()) {
|
2013-09-19 08:41:32 +08:00
|
|
|
if (SkipBlockComment(Result, CurPtr+2, TokAtPhysicalStartOfLine))
|
|
|
|
return true; // There is a token to return.
|
2007-07-22 14:29:05 +08:00
|
|
|
goto SkipIgnoredUnits;
|
|
|
|
} else if (isHorizontalWhitespace(*CurPtr)) {
|
|
|
|
goto SkipHorizontalWhitespace;
|
|
|
|
}
|
2013-09-19 08:41:32 +08:00
|
|
|
// We only saw whitespace, so just try again with this lexer.
|
|
|
|
// (We manually eliminate the tail call to avoid recursion.)
|
|
|
|
goto LexNextToken;
|
2009-12-17 13:29:40 +08:00
|
|
|
|
2008-01-04 01:58:54 +08:00
|
|
|
// C99 6.4.4.1: Integer Constants.
|
|
|
|
// C99 6.4.4.2: Floating Constants.
|
|
|
|
case '0': case '1': case '2': case '3': case '4':
|
|
|
|
case '5': case '6': case '7': case '8': case '9':
|
|
|
|
// Notify MIOpt that we read a non-whitespace/non-comment token.
|
|
|
|
MIOpt.ReadToken();
|
|
|
|
return LexNumericConstant(Result, CurPtr);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2013-03-10 07:56:02 +08:00
|
|
|
case 'u': // Identifier (uber) or C11/C++11 UTF-8 or UTF-16 string literal
|
2011-07-27 13:40:30 +08:00
|
|
|
// Notify MIOpt that we read a non-whitespace/non-comment token.
|
|
|
|
MIOpt.ReadToken();
|
|
|
|
|
2013-03-10 07:56:02 +08:00
|
|
|
if (LangOpts.CPlusPlus11 || LangOpts.C11) {
|
2011-07-27 13:40:30 +08:00
|
|
|
Char = getCharAndSize(CurPtr, SizeTmp);
|
|
|
|
|
|
|
|
// UTF-16 string literal
|
|
|
|
if (Char == '"')
|
|
|
|
return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
|
|
|
|
tok::utf16_string_literal);
|
|
|
|
|
|
|
|
// UTF-16 character constant
|
|
|
|
if (Char == '\'')
|
|
|
|
return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result),
|
|
|
|
tok::utf16_char_constant);
|
|
|
|
|
2011-08-11 12:06:15 +08:00
|
|
|
// UTF-16 raw string literal
|
2013-03-10 07:56:02 +08:00
|
|
|
if (Char == 'R' && LangOpts.CPlusPlus11 &&
|
|
|
|
getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
|
2011-08-11 12:06:15 +08:00
|
|
|
return LexRawStringLiteral(Result,
|
|
|
|
ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
|
|
|
|
SizeTmp2, Result),
|
|
|
|
tok::utf16_string_literal);
|
|
|
|
|
|
|
|
if (Char == '8') {
|
|
|
|
char Char2 = getCharAndSize(CurPtr + SizeTmp, SizeTmp2);
|
|
|
|
|
|
|
|
// UTF-8 string literal
|
|
|
|
if (Char2 == '"')
|
|
|
|
return LexStringLiteral(Result,
|
|
|
|
ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
|
|
|
|
SizeTmp2, Result),
|
|
|
|
tok::utf8_string_literal);
|
|
|
|
|
2013-03-10 07:56:02 +08:00
|
|
|
if (Char2 == 'R' && LangOpts.CPlusPlus11) {
|
2011-08-11 12:06:15 +08:00
|
|
|
unsigned SizeTmp3;
|
|
|
|
char Char3 = getCharAndSize(CurPtr + SizeTmp + SizeTmp2, SizeTmp3);
|
|
|
|
// UTF-8 raw string literal
|
|
|
|
if (Char3 == '"') {
|
|
|
|
return LexRawStringLiteral(Result,
|
|
|
|
ConsumeChar(ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
|
|
|
|
SizeTmp2, Result),
|
|
|
|
SizeTmp3, Result),
|
|
|
|
tok::utf8_string_literal);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-07-27 13:40:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// treat u like the start of an identifier.
|
|
|
|
return LexIdentifier(Result, CurPtr);
|
|
|
|
|
2013-03-10 07:56:02 +08:00
|
|
|
case 'U': // Identifier (Uber) or C11/C++11 UTF-32 string literal
|
2011-07-27 13:40:30 +08:00
|
|
|
// Notify MIOpt that we read a non-whitespace/non-comment token.
|
|
|
|
MIOpt.ReadToken();
|
|
|
|
|
2013-03-10 07:56:02 +08:00
|
|
|
if (LangOpts.CPlusPlus11 || LangOpts.C11) {
|
2011-07-27 13:40:30 +08:00
|
|
|
Char = getCharAndSize(CurPtr, SizeTmp);
|
|
|
|
|
|
|
|
// UTF-32 string literal
|
|
|
|
if (Char == '"')
|
|
|
|
return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
|
|
|
|
tok::utf32_string_literal);
|
|
|
|
|
|
|
|
// UTF-32 character constant
|
|
|
|
if (Char == '\'')
|
|
|
|
return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result),
|
|
|
|
tok::utf32_char_constant);
|
2011-08-11 12:06:15 +08:00
|
|
|
|
|
|
|
// UTF-32 raw string literal
|
2013-03-10 07:56:02 +08:00
|
|
|
if (Char == 'R' && LangOpts.CPlusPlus11 &&
|
|
|
|
getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
|
2011-08-11 12:06:15 +08:00
|
|
|
return LexRawStringLiteral(Result,
|
|
|
|
ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
|
|
|
|
SizeTmp2, Result),
|
|
|
|
tok::utf32_string_literal);
|
2011-07-27 13:40:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// treat U like the start of an identifier.
|
|
|
|
return LexIdentifier(Result, CurPtr);
|
|
|
|
|
2011-08-11 12:06:15 +08:00
|
|
|
case 'R': // Identifier or C++0x raw string literal
|
|
|
|
// Notify MIOpt that we read a non-whitespace/non-comment token.
|
|
|
|
MIOpt.ReadToken();
|
|
|
|
|
2013-01-02 19:42:31 +08:00
|
|
|
if (LangOpts.CPlusPlus11) {
|
2011-08-11 12:06:15 +08:00
|
|
|
Char = getCharAndSize(CurPtr, SizeTmp);
|
|
|
|
|
|
|
|
if (Char == '"')
|
|
|
|
return LexRawStringLiteral(Result,
|
|
|
|
ConsumeChar(CurPtr, SizeTmp, Result),
|
|
|
|
tok::string_literal);
|
|
|
|
}
|
|
|
|
|
|
|
|
// treat R like the start of an identifier.
|
|
|
|
return LexIdentifier(Result, CurPtr);
|
|
|
|
|
2008-01-04 01:58:54 +08:00
|
|
|
case 'L': // Identifier (Loony) or wide literal (L'x' or L"xyz").
|
2006-07-04 15:11:10 +08:00
|
|
|
// Notify MIOpt that we read a non-whitespace/non-comment token.
|
|
|
|
MIOpt.ReadToken();
|
2006-06-18 13:43:12 +08:00
|
|
|
Char = getCharAndSize(CurPtr, SizeTmp);
|
|
|
|
|
|
|
|
// Wide string literal.
|
|
|
|
if (Char == '"')
|
2006-10-06 13:22:26 +08:00
|
|
|
return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
|
2011-07-27 13:40:30 +08:00
|
|
|
tok::wide_string_literal);
|
2006-06-18 13:43:12 +08:00
|
|
|
|
2011-08-11 12:06:15 +08:00
|
|
|
// Wide raw string literal.
|
2013-01-02 19:42:31 +08:00
|
|
|
if (LangOpts.CPlusPlus11 && Char == 'R' &&
|
2011-08-11 12:06:15 +08:00
|
|
|
getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
|
|
|
|
return LexRawStringLiteral(Result,
|
|
|
|
ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
|
|
|
|
SizeTmp2, Result),
|
|
|
|
tok::wide_string_literal);
|
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// Wide character constant.
|
|
|
|
if (Char == '\'')
|
2011-07-27 13:40:30 +08:00
|
|
|
return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result),
|
|
|
|
tok::wide_char_constant);
|
2006-06-18 13:43:12 +08:00
|
|
|
// FALL THROUGH, treating L like the start of an identifier.
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// C99 6.4.2: Identifiers.
|
|
|
|
case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G':
|
|
|
|
case 'H': case 'I': case 'J': case 'K': /*'L'*/case 'M': case 'N':
|
2011-08-11 12:06:15 +08:00
|
|
|
case 'O': case 'P': case 'Q': /*'R'*/case 'S': case 'T': /*'U'*/
|
2006-06-18 13:43:12 +08:00
|
|
|
case 'V': case 'W': case 'X': case 'Y': case 'Z':
|
|
|
|
case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g':
|
|
|
|
case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n':
|
2011-07-27 13:40:30 +08:00
|
|
|
case 'o': case 'p': case 'q': case 'r': case 's': case 't': /*'u'*/
|
2006-06-18 13:43:12 +08:00
|
|
|
case 'v': case 'w': case 'x': case 'y': case 'z':
|
|
|
|
case '_':
|
2006-07-04 15:11:10 +08:00
|
|
|
// Notify MIOpt that we read a non-whitespace/non-comment token.
|
|
|
|
MIOpt.ReadToken();
|
2006-06-18 13:43:12 +08:00
|
|
|
return LexIdentifier(Result, CurPtr);
|
2008-01-04 01:58:54 +08:00
|
|
|
|
|
|
|
case '$': // $ in identifiers.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (LangOpts.DollarIdents) {
|
2008-11-22 10:02:22 +08:00
|
|
|
if (!isLexingRawMode())
|
|
|
|
Diag(CurPtr-1, diag::ext_dollar_in_identifier);
|
2008-01-04 01:58:54 +08:00
|
|
|
// Notify MIOpt that we read a non-whitespace/non-comment token.
|
|
|
|
MIOpt.ReadToken();
|
|
|
|
return LexIdentifier(Result, CurPtr);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::unknown;
|
2008-01-04 01:58:54 +08:00
|
|
|
break;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// C99 6.4.4: Character Constants.
|
|
|
|
case '\'':
|
2006-07-04 15:11:10 +08:00
|
|
|
// Notify MIOpt that we read a non-whitespace/non-comment token.
|
|
|
|
MIOpt.ReadToken();
|
2011-07-27 13:40:30 +08:00
|
|
|
return LexCharConstant(Result, CurPtr, tok::char_constant);
|
2006-06-18 13:43:12 +08:00
|
|
|
|
|
|
|
// C99 6.4.5: String Literals.
|
|
|
|
case '"':
|
2006-07-04 15:11:10 +08:00
|
|
|
// Notify MIOpt that we read a non-whitespace/non-comment token.
|
|
|
|
MIOpt.ReadToken();
|
2011-07-27 13:40:30 +08:00
|
|
|
return LexStringLiteral(Result, CurPtr, tok::string_literal);
|
2006-06-18 13:43:12 +08:00
|
|
|
|
|
|
|
// C99 6.4.6: Punctuators.
|
|
|
|
case '?':
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::question;
|
2006-06-18 13:43:12 +08:00
|
|
|
break;
|
|
|
|
case '[':
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::l_square;
|
2006-06-18 13:43:12 +08:00
|
|
|
break;
|
|
|
|
case ']':
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::r_square;
|
2006-06-18 13:43:12 +08:00
|
|
|
break;
|
|
|
|
case '(':
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::l_paren;
|
2006-06-18 13:43:12 +08:00
|
|
|
break;
|
|
|
|
case ')':
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::r_paren;
|
2006-06-18 13:43:12 +08:00
|
|
|
break;
|
|
|
|
case '{':
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::l_brace;
|
2006-06-18 13:43:12 +08:00
|
|
|
break;
|
|
|
|
case '}':
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::r_brace;
|
2006-06-18 13:43:12 +08:00
|
|
|
break;
|
|
|
|
case '.':
|
|
|
|
Char = getCharAndSize(CurPtr, SizeTmp);
|
|
|
|
if (Char >= '0' && Char <= '9') {
|
2006-07-04 15:11:10 +08:00
|
|
|
// Notify MIOpt that we read a non-whitespace/non-comment token.
|
|
|
|
MIOpt.ReadToken();
|
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
return LexNumericConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result));
|
2012-03-11 15:00:24 +08:00
|
|
|
} else if (LangOpts.CPlusPlus && Char == '*') {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::periodstar;
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr += SizeTmp;
|
|
|
|
} else if (Char == '.' &&
|
|
|
|
getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '.') {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::ellipsis;
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
|
|
|
|
SizeTmp2, Result);
|
|
|
|
} else {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::period;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case '&':
|
|
|
|
Char = getCharAndSize(CurPtr, SizeTmp);
|
|
|
|
if (Char == '&') {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::ampamp;
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
|
|
|
} else if (Char == '=') {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::ampequal;
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
|
|
|
} else {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::amp;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
break;
|
2009-09-09 23:08:12 +08:00
|
|
|
case '*':
|
2006-06-18 13:43:12 +08:00
|
|
|
if (getCharAndSize(CurPtr, SizeTmp) == '=') {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::starequal;
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
|
|
|
} else {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::star;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case '+':
|
|
|
|
Char = getCharAndSize(CurPtr, SizeTmp);
|
|
|
|
if (Char == '+') {
|
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::plusplus;
|
2006-06-18 13:43:12 +08:00
|
|
|
} else if (Char == '=') {
|
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::plusequal;
|
2006-06-18 13:43:12 +08:00
|
|
|
} else {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::plus;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case '-':
|
|
|
|
Char = getCharAndSize(CurPtr, SizeTmp);
|
2008-10-12 12:51:35 +08:00
|
|
|
if (Char == '-') { // --
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::minusminus;
|
2012-03-11 15:00:24 +08:00
|
|
|
} else if (Char == '>' && LangOpts.CPlusPlus &&
|
2008-10-12 12:51:35 +08:00
|
|
|
getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '*') { // C++ ->*
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
|
|
|
|
SizeTmp2, Result);
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::arrowstar;
|
|
|
|
} else if (Char == '>') { // ->
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::arrow;
|
|
|
|
} else if (Char == '=') { // -=
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::minusequal;
|
2006-06-18 13:43:12 +08:00
|
|
|
} else {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::minus;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case '~':
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::tilde;
|
2006-06-18 13:43:12 +08:00
|
|
|
break;
|
|
|
|
case '!':
|
|
|
|
if (getCharAndSize(CurPtr, SizeTmp) == '=') {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::exclaimequal;
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
|
|
|
} else {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::exclaim;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case '/':
|
|
|
|
// 6.4.9: Comments
|
|
|
|
Char = getCharAndSize(CurPtr, SizeTmp);
|
2012-11-11 15:02:14 +08:00
|
|
|
if (Char == '/') { // Line comment.
|
|
|
|
// Even if Line comments are disabled (e.g. in C89 mode), we generally
|
2009-01-17 06:39:25 +08:00
|
|
|
// want to lex this as a comment. There is one problem with this though,
|
|
|
|
// that in one particular corner case, this can change the behavior of the
|
|
|
|
// resultant program. For example, In "foo //**/ bar", C89 would lex
|
2012-11-11 15:02:14 +08:00
|
|
|
// this as "foo / bar" and langauges with Line comments would lex it as
|
2009-01-17 06:39:25 +08:00
|
|
|
// "foo". Check to see if the character after the second slash is a '*'.
|
|
|
|
// If so, we will lex that as a "/" instead of the start of a comment.
|
2013-03-06 06:51:04 +08:00
|
|
|
// However, we never do this if we are just preprocessing.
|
2013-08-29 04:53:32 +08:00
|
|
|
bool TreatAsComment = LangOpts.LineComment &&
|
|
|
|
(LangOpts.CPlusPlus || !LangOpts.TraditionalCPP);
|
2013-03-06 06:51:04 +08:00
|
|
|
if (!TreatAsComment)
|
|
|
|
if (!(PP && PP->isPreprocessedOutput()))
|
|
|
|
TreatAsComment = getCharAndSize(CurPtr+SizeTmp, SizeTmp2) != '*';
|
|
|
|
|
|
|
|
if (TreatAsComment) {
|
2013-09-19 08:41:32 +08:00
|
|
|
if (SkipLineComment(Result, ConsumeChar(CurPtr, SizeTmp, Result),
|
|
|
|
TokAtPhysicalStartOfLine))
|
|
|
|
return true; // There is a token to return.
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-17 06:39:25 +08:00
|
|
|
// It is common for the tokens immediately after a // comment to be
|
|
|
|
// whitespace (indentation for the next line). Instead of going through
|
|
|
|
// the big switch, handle it efficiently now.
|
|
|
|
goto SkipIgnoredUnits;
|
|
|
|
}
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-17 06:39:25 +08:00
|
|
|
if (Char == '*') { // /**/ comment.
|
2013-09-19 08:41:32 +08:00
|
|
|
if (SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result),
|
|
|
|
TokAtPhysicalStartOfLine))
|
|
|
|
return true; // There is a token to return.
|
|
|
|
|
|
|
|
// We only saw whitespace, so just try again with this lexer.
|
|
|
|
// (We manually eliminate the tail call to avoid recursion.)
|
|
|
|
goto LexNextToken;
|
2009-01-17 06:39:25 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-17 06:39:25 +08:00
|
|
|
if (Char == '=') {
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::slashequal;
|
2006-06-18 13:43:12 +08:00
|
|
|
} else {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::slash;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case '%':
|
|
|
|
Char = getCharAndSize(CurPtr, SizeTmp);
|
|
|
|
if (Char == '=') {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::percentequal;
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
2012-03-11 15:00:24 +08:00
|
|
|
} else if (LangOpts.Digraphs && Char == '>') {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::r_brace; // '%>' -> '}'
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
2012-03-11 15:00:24 +08:00
|
|
|
} else if (LangOpts.Digraphs && Char == ':') {
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
2006-07-15 13:41:09 +08:00
|
|
|
Char = getCharAndSize(CurPtr, SizeTmp);
|
|
|
|
if (Char == '%' && getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == ':') {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::hashhash; // '%:%:' -> '##'
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
|
|
|
|
SizeTmp2, Result);
|
2012-03-11 15:00:24 +08:00
|
|
|
} else if (Char == '@' && LangOpts.MicrosoftExt) {// %:@ -> #@ -> Charize
|
2006-07-15 13:41:09 +08:00
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
2008-11-22 10:02:22 +08:00
|
|
|
if (!isLexingRawMode())
|
2011-10-18 05:47:53 +08:00
|
|
|
Diag(BufferPtr, diag::ext_charize_microsoft);
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::hashat;
|
2009-03-19 04:58:27 +08:00
|
|
|
} else { // '%:' -> '#'
|
2006-06-18 13:43:12 +08:00
|
|
|
// We parsed a # character. If this occurs at the start of the line,
|
|
|
|
// it's actually the start of a preprocessing directive. Callback to
|
|
|
|
// the preprocessor to handle it.
|
|
|
|
// FIXME: -fpreprocessed mode??
|
2013-09-19 08:41:32 +08:00
|
|
|
if (TokAtPhysicalStartOfLine && !LexingRawMode && !Is_PragmaLexer)
|
2012-11-13 09:02:40 +08:00
|
|
|
goto HandleDirective;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-03-19 04:58:27 +08:00
|
|
|
Kind = tok::hash;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
} else {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::percent;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case '<':
|
|
|
|
Char = getCharAndSize(CurPtr, SizeTmp);
|
|
|
|
if (ParsingFilename) {
|
2009-04-18 07:56:52 +08:00
|
|
|
return LexAngledStringLiteral(Result, CurPtr);
|
2006-06-18 13:43:12 +08:00
|
|
|
} else if (Char == '<') {
|
2009-12-14 14:16:57 +08:00
|
|
|
char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2);
|
|
|
|
if (After == '=') {
|
|
|
|
Kind = tok::lesslessequal;
|
|
|
|
CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
|
|
|
|
SizeTmp2, Result);
|
|
|
|
} else if (After == '<' && IsStartOfConflictMarker(CurPtr-1)) {
|
|
|
|
// If this is actually a '<<<<<<<' version control conflict marker,
|
|
|
|
// recognize it as such and recover nicely.
|
|
|
|
goto LexNextToken;
|
2011-10-12 08:37:51 +08:00
|
|
|
} else if (After == '<' && HandleEndOfConflictMarker(CurPtr-1)) {
|
|
|
|
// If this is '<<<<' and we're in a Perforce-style conflict marker,
|
|
|
|
// ignore it.
|
|
|
|
goto LexNextToken;
|
2012-03-11 15:00:24 +08:00
|
|
|
} else if (LangOpts.CUDA && After == '<') {
|
2011-02-10 05:08:21 +08:00
|
|
|
Kind = tok::lesslessless;
|
|
|
|
CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
|
|
|
|
SizeTmp2, Result);
|
2009-12-14 14:16:57 +08:00
|
|
|
} else {
|
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
|
|
|
Kind = tok::lessless;
|
|
|
|
}
|
2006-06-18 13:43:12 +08:00
|
|
|
} else if (Char == '=') {
|
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::lessequal;
|
2012-03-11 15:00:24 +08:00
|
|
|
} else if (LangOpts.Digraphs && Char == ':') { // '<:' -> '['
|
2013-01-02 19:42:31 +08:00
|
|
|
if (LangOpts.CPlusPlus11 &&
|
2011-04-15 02:36:27 +08:00
|
|
|
getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == ':') {
|
|
|
|
// C++0x [lex.pptoken]p3:
|
|
|
|
// Otherwise, if the next three characters are <:: and the subsequent
|
|
|
|
// character is neither : nor >, the < is treated as a preprocessor
|
|
|
|
// token by itself and not as the first character of the alternative
|
|
|
|
// token <:.
|
|
|
|
unsigned SizeTmp3;
|
|
|
|
char After = getCharAndSize(CurPtr + SizeTmp + SizeTmp2, SizeTmp3);
|
|
|
|
if (After != ':' && After != '>') {
|
|
|
|
Kind = tok::less;
|
2011-10-15 09:18:56 +08:00
|
|
|
if (!isLexingRawMode())
|
|
|
|
Diag(BufferPtr, diag::warn_cxx98_compat_less_colon_colon);
|
2011-04-15 02:36:27 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::l_square;
|
2012-03-11 15:00:24 +08:00
|
|
|
} else if (LangOpts.Digraphs && Char == '%') { // '<%' -> '{'
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::l_brace;
|
2006-06-18 13:43:12 +08:00
|
|
|
} else {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::less;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case '>':
|
|
|
|
Char = getCharAndSize(CurPtr, SizeTmp);
|
|
|
|
if (Char == '=') {
|
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::greaterequal;
|
2006-06-18 13:43:12 +08:00
|
|
|
} else if (Char == '>') {
|
2009-12-14 14:16:57 +08:00
|
|
|
char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2);
|
|
|
|
if (After == '=') {
|
|
|
|
CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
|
|
|
|
SizeTmp2, Result);
|
|
|
|
Kind = tok::greatergreaterequal;
|
2011-10-12 08:37:51 +08:00
|
|
|
} else if (After == '>' && IsStartOfConflictMarker(CurPtr-1)) {
|
|
|
|
// If this is actually a '>>>>' conflict marker, recognize it as such
|
|
|
|
// and recover nicely.
|
|
|
|
goto LexNextToken;
|
2009-12-14 14:16:57 +08:00
|
|
|
} else if (After == '>' && HandleEndOfConflictMarker(CurPtr-1)) {
|
|
|
|
// If this is '>>>>>>>' and we're in a conflict marker, ignore it.
|
|
|
|
goto LexNextToken;
|
2012-03-11 15:00:24 +08:00
|
|
|
} else if (LangOpts.CUDA && After == '>') {
|
2011-02-10 05:08:21 +08:00
|
|
|
Kind = tok::greatergreatergreater;
|
|
|
|
CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
|
|
|
|
SizeTmp2, Result);
|
2009-12-14 14:16:57 +08:00
|
|
|
} else {
|
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
|
|
|
Kind = tok::greatergreater;
|
|
|
|
}
|
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
} else {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::greater;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case '^':
|
|
|
|
Char = getCharAndSize(CurPtr, SizeTmp);
|
|
|
|
if (Char == '=') {
|
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::caretequal;
|
2006-06-18 13:43:12 +08:00
|
|
|
} else {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::caret;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case '|':
|
|
|
|
Char = getCharAndSize(CurPtr, SizeTmp);
|
|
|
|
if (Char == '=') {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::pipeequal;
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
|
|
|
} else if (Char == '|') {
|
2009-12-14 14:16:57 +08:00
|
|
|
// If this is '|||||||' and we're in a conflict marker, ignore it.
|
|
|
|
if (CurPtr[1] == '|' && HandleEndOfConflictMarker(CurPtr-1))
|
|
|
|
goto LexNextToken;
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::pipepipe;
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
|
|
|
} else {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::pipe;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ':':
|
|
|
|
Char = getCharAndSize(CurPtr, SizeTmp);
|
2012-03-11 15:00:24 +08:00
|
|
|
if (LangOpts.Digraphs && Char == '>') {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::r_square; // ':>' -> ']'
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
2012-03-11 15:00:24 +08:00
|
|
|
} else if (LangOpts.CPlusPlus && Char == ':') {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::coloncolon;
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
2009-09-09 23:08:12 +08:00
|
|
|
} else {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::colon;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ';':
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::semi;
|
2006-06-18 13:43:12 +08:00
|
|
|
break;
|
|
|
|
case '=':
|
|
|
|
Char = getCharAndSize(CurPtr, SizeTmp);
|
|
|
|
if (Char == '=') {
|
2011-10-12 08:37:51 +08:00
|
|
|
// If this is '====' and we're in a conflict marker, ignore it.
|
2009-12-14 14:16:57 +08:00
|
|
|
if (CurPtr[1] == '=' && HandleEndOfConflictMarker(CurPtr-1))
|
|
|
|
goto LexNextToken;
|
|
|
|
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::equalequal;
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
2009-09-09 23:08:12 +08:00
|
|
|
} else {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::equal;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ',':
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::comma;
|
2006-06-18 13:43:12 +08:00
|
|
|
break;
|
|
|
|
case '#':
|
|
|
|
Char = getCharAndSize(CurPtr, SizeTmp);
|
|
|
|
if (Char == '#') {
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::hashhash;
|
2006-06-18 13:43:12 +08:00
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
2012-03-11 15:00:24 +08:00
|
|
|
} else if (Char == '@' && LangOpts.MicrosoftExt) { // #@ -> Charize
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::hashat;
|
2008-11-22 10:02:22 +08:00
|
|
|
if (!isLexingRawMode())
|
2011-10-18 05:47:53 +08:00
|
|
|
Diag(BufferPtr, diag::ext_charize_microsoft);
|
2006-07-15 13:41:09 +08:00
|
|
|
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
|
2006-06-18 13:43:12 +08:00
|
|
|
} else {
|
|
|
|
// We parsed a # character. If this occurs at the start of the line,
|
|
|
|
// it's actually the start of a preprocessing directive. Callback to
|
|
|
|
// the preprocessor to handle it.
|
2006-07-03 08:55:48 +08:00
|
|
|
// FIXME: -fpreprocessed mode??
|
2013-09-19 08:41:32 +08:00
|
|
|
if (TokAtPhysicalStartOfLine && !LexingRawMode && !Is_PragmaLexer)
|
2012-11-13 09:02:40 +08:00
|
|
|
goto HandleDirective;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-03-19 04:58:27 +08:00
|
|
|
Kind = tok::hash;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2008-01-04 01:58:54 +08:00
|
|
|
case '@':
|
|
|
|
// Objective C support.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (CurPtr[-1] == '@' && LangOpts.ObjC1)
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::at;
|
2008-01-04 01:58:54 +08:00
|
|
|
else
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::unknown;
|
2008-01-04 01:58:54 +08:00
|
|
|
break;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2013-01-25 04:50:46 +08:00
|
|
|
// UCNs (C99 6.4.3, C++11 [lex.charset]p2)
|
2006-06-18 13:43:12 +08:00
|
|
|
case '\\':
|
2013-09-19 08:41:32 +08:00
|
|
|
if (uint32_t CodePoint = tryReadUCN(CurPtr, BufferPtr, &Result)) {
|
|
|
|
if (CheckUnicodeWhitespace(Result, CodePoint, CurPtr)) {
|
|
|
|
if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
|
|
|
|
return true; // KeepWhitespaceMode
|
|
|
|
|
|
|
|
// We only saw whitespace, so just try again with this lexer.
|
|
|
|
// (We manually eliminate the tail call to avoid recursion.)
|
|
|
|
goto LexNextToken;
|
|
|
|
}
|
|
|
|
|
2013-01-25 04:50:46 +08:00
|
|
|
return LexUnicode(Result, CodePoint, CurPtr);
|
2013-09-19 08:41:32 +08:00
|
|
|
}
|
2013-01-25 04:50:46 +08:00
|
|
|
|
2008-10-12 12:51:35 +08:00
|
|
|
Kind = tok::unknown;
|
2006-07-11 13:52:53 +08:00
|
|
|
break;
|
2013-01-25 04:50:46 +08:00
|
|
|
|
|
|
|
default: {
|
|
|
|
if (isASCII(Char)) {
|
|
|
|
Kind = tok::unknown;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
UTF32 CodePoint;
|
|
|
|
|
|
|
|
// We can't just reset CurPtr to BufferPtr because BufferPtr may point to
|
|
|
|
// an escaped newline.
|
|
|
|
--CurPtr;
|
2013-01-30 20:06:08 +08:00
|
|
|
ConversionResult Status =
|
|
|
|
llvm::convertUTF8Sequence((const UTF8 **)&CurPtr,
|
|
|
|
(const UTF8 *)BufferEnd,
|
|
|
|
&CodePoint,
|
|
|
|
strictConversion);
|
2013-09-19 08:41:32 +08:00
|
|
|
if (Status == conversionOK) {
|
|
|
|
if (CheckUnicodeWhitespace(Result, CodePoint, CurPtr)) {
|
|
|
|
if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
|
|
|
|
return true; // KeepWhitespaceMode
|
|
|
|
|
|
|
|
// We only saw whitespace, so just try again with this lexer.
|
|
|
|
// (We manually eliminate the tail call to avoid recursion.)
|
|
|
|
goto LexNextToken;
|
|
|
|
}
|
2013-01-25 04:50:46 +08:00
|
|
|
return LexUnicode(Result, CodePoint, CurPtr);
|
2013-09-19 08:41:32 +08:00
|
|
|
}
|
2013-01-25 04:50:46 +08:00
|
|
|
|
2013-02-01 03:48:48 +08:00
|
|
|
if (isLexingRawMode() || ParsingPreprocessorDirective ||
|
|
|
|
PP->isPreprocessedOutput()) {
|
2013-01-31 03:21:12 +08:00
|
|
|
++CurPtr;
|
2013-01-30 09:52:57 +08:00
|
|
|
Kind = tok::unknown;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-01-25 04:50:46 +08:00
|
|
|
// Non-ASCII characters tend to creep into source code unintentionally.
|
|
|
|
// Instead of letting the parser complain about the unknown token,
|
2013-01-25 08:20:28 +08:00
|
|
|
// just diagnose the invalid UTF-8, then drop the character.
|
2013-01-30 09:52:57 +08:00
|
|
|
Diag(CurPtr, diag::err_invalid_utf8);
|
2013-01-25 04:50:46 +08:00
|
|
|
|
|
|
|
BufferPtr = CurPtr+1;
|
2013-09-19 08:41:32 +08:00
|
|
|
// We're pretending the character didn't exist, so just try again with
|
|
|
|
// this lexer.
|
|
|
|
// (We manually eliminate the tail call to avoid recursion.)
|
2013-01-25 04:50:46 +08:00
|
|
|
goto LexNextToken;
|
|
|
|
}
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-07-04 15:11:10 +08:00
|
|
|
// Notify MIOpt that we read a non-whitespace/non-comment token.
|
|
|
|
MIOpt.ReadToken();
|
|
|
|
|
2006-06-19 00:22:51 +08:00
|
|
|
// Update the location of token as well as BufferPtr.
|
2008-10-12 12:51:35 +08:00
|
|
|
FormTokenWithChars(Result, CurPtr, Kind);
|
2013-09-19 08:41:32 +08:00
|
|
|
return true;
|
2012-11-13 09:02:40 +08:00
|
|
|
|
|
|
|
HandleDirective:
|
|
|
|
// We parsed a # character and it's the start of a preprocessing directive.
|
|
|
|
|
|
|
|
FormTokenWithChars(Result, CurPtr, tok::hash);
|
|
|
|
PP->HandleDirective(Result);
|
|
|
|
|
2013-05-24 13:44:08 +08:00
|
|
|
if (PP->hadModuleLoaderFatalFailure()) {
|
|
|
|
// With a fatal failure in the module loader, we abort parsing.
|
|
|
|
assert(Result.is(tok::eof) && "Preprocessor did not set tok:eof");
|
2013-09-19 08:41:32 +08:00
|
|
|
return true;
|
2013-05-24 13:44:08 +08:00
|
|
|
}
|
|
|
|
|
2013-09-19 08:41:32 +08:00
|
|
|
// We parsed the directive; lex a token with the new state.
|
|
|
|
return false;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|