2006-06-18 13:43:12 +08:00
|
|
|
//===--- Preprocess.cpp - C Language Family Preprocessor Implementation ---===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 03:59:25 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2006-06-18 13:43:12 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements the Preprocessor interface.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Options to support:
|
|
|
|
// -H - Print the name of each header file used.
|
2009-02-06 14:45:26 +08:00
|
|
|
// -d[DNI] - Dump various things.
|
2006-06-18 13:43:12 +08:00
|
|
|
// -fworking-directory - #line's with preprocessor's working dir.
|
|
|
|
// -fpreprocessed
|
|
|
|
// -dependency-file,-M,-MM,-MF,-MG,-MP,-MT,-MQ,-MD,-MMD
|
|
|
|
// -W*
|
|
|
|
// -w
|
|
|
|
//
|
|
|
|
// Messages to emit:
|
|
|
|
// "Multiple include guards may be useful for:\n"
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "clang/Lex/Preprocessor.h"
|
2009-12-15 09:51:03 +08:00
|
|
|
#include "MacroArgs.h"
|
2010-01-05 03:18:44 +08:00
|
|
|
#include "clang/Lex/ExternalPreprocessorSource.h"
|
2006-10-22 15:28:56 +08:00
|
|
|
#include "clang/Lex/HeaderSearch.h"
|
2006-06-18 13:43:12 +08:00
|
|
|
#include "clang/Lex/MacroInfo.h"
|
2006-06-25 05:31:03 +08:00
|
|
|
#include "clang/Lex/Pragma.h"
|
2010-03-20 00:15:56 +08:00
|
|
|
#include "clang/Lex/PreprocessingRecord.h"
|
2006-06-28 14:49:17 +08:00
|
|
|
#include "clang/Lex/ScratchBuffer.h"
|
2009-01-29 13:15:15 +08:00
|
|
|
#include "clang/Lex/LexDiagnostic.h"
|
2010-08-25 03:08:16 +08:00
|
|
|
#include "clang/Lex/CodeCompletionHandler.h"
|
2006-06-18 13:43:12 +08:00
|
|
|
#include "clang/Basic/SourceManager.h"
|
2009-02-12 11:26:59 +08:00
|
|
|
#include "clang/Basic/FileManager.h"
|
2006-10-15 03:03:49 +08:00
|
|
|
#include "clang/Basic/TargetInfo.h"
|
2008-10-06 04:40:30 +08:00
|
|
|
#include "llvm/ADT/APFloat.h"
|
2006-07-26 14:26:52 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2007-07-16 14:48:38 +08:00
|
|
|
#include "llvm/Support/MemoryBuffer.h"
|
2009-08-23 20:08:50 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2006-06-18 13:43:12 +08:00
|
|
|
using namespace clang;
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
2010-01-05 03:18:44 +08:00
|
|
|
ExternalPreprocessorSource::~ExternalPreprocessorSource() { }
|
2006-06-18 13:43:12 +08:00
|
|
|
|
2006-10-14 15:50:21 +08:00
|
|
|
Preprocessor::Preprocessor(Diagnostic &diags, const LangOptions &opts,
|
2009-11-13 13:51:54 +08:00
|
|
|
const TargetInfo &target, SourceManager &SM,
|
2009-01-16 02:47:46 +08:00
|
|
|
HeaderSearch &Headers,
|
2009-11-12 05:44:21 +08:00
|
|
|
IdentifierInfoLookup* IILookup,
|
|
|
|
bool OwnsHeaders)
|
2009-03-14 05:17:43 +08:00
|
|
|
: Diags(&diags), Features(opts), Target(target),FileMgr(Headers.getFileMgr()),
|
2010-01-05 03:18:44 +08:00
|
|
|
SourceMgr(SM), HeaderInfo(Headers), ExternalSource(0),
|
2010-08-25 03:08:16 +08:00
|
|
|
Identifiers(opts, IILookup), BuiltinInfo(Target), CodeComplete(0),
|
|
|
|
CodeCompletionFile(0), SkipMainFilePreamble(0, true), CurPPLexer(0),
|
2010-10-20 06:15:20 +08:00
|
|
|
CurDirLookup(0), Callbacks(0), MacroArgCache(0), Record(0), MIChainHead(0),
|
|
|
|
MICache(0) {
|
2006-06-28 14:49:17 +08:00
|
|
|
ScratchBuf = new ScratchBuffer(SourceMgr);
|
2009-04-13 09:29:17 +08:00
|
|
|
CounterValue = 0; // __COUNTER__ starts at 0.
|
2009-11-12 05:44:21 +08:00
|
|
|
OwnsHeaderSearch = OwnsHeaders;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// Clear stats.
|
2006-10-18 13:34:33 +08:00
|
|
|
NumDirectives = NumDefined = NumUndefined = NumPragma = 0;
|
2006-06-18 13:43:12 +08:00
|
|
|
NumIf = NumElse = NumEndif = 0;
|
2006-07-09 08:45:31 +08:00
|
|
|
NumEnteredSourceFiles = 0;
|
|
|
|
NumMacroExpanded = NumFnMacroExpanded = NumBuiltinMacroExpanded = 0;
|
2006-07-20 12:47:30 +08:00
|
|
|
NumFastMacroExpanded = NumTokenPaste = NumFastTokenPaste = 0;
|
2009-09-09 23:08:12 +08:00
|
|
|
MaxIncludeStackDepth = 0;
|
2006-06-18 13:43:12 +08:00
|
|
|
NumSkipped = 0;
|
2006-11-21 14:17:10 +08:00
|
|
|
|
|
|
|
// Default to discarding comments.
|
|
|
|
KeepComments = false;
|
|
|
|
KeepMacroComments = false;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// Macro expansion is enabled.
|
|
|
|
DisableMacroExpansion = false;
|
2006-07-15 15:42:55 +08:00
|
|
|
InMacroArgs = false;
|
2008-03-09 10:26:03 +08:00
|
|
|
NumCachedTokenLexers = 0;
|
2006-06-21 14:50:18 +08:00
|
|
|
|
2008-08-10 21:15:22 +08:00
|
|
|
CachedLexPos = 0;
|
|
|
|
|
2010-01-05 03:18:44 +08:00
|
|
|
// We haven't read anything from the external source.
|
|
|
|
ReadMacrosFromExternalSource = false;
|
2010-03-13 18:17:05 +08:00
|
|
|
|
2006-07-06 13:17:39 +08:00
|
|
|
// "Poison" __VA_ARGS__, which can only appear in the expansion of a macro.
|
|
|
|
// This gets unpoisoned where it is allowed.
|
|
|
|
(Ident__VA_ARGS__ = getIdentifierInfo("__VA_ARGS__"))->setIsPoisoned();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-25 05:31:03 +08:00
|
|
|
// Initialize the pragma handlers.
|
2010-07-13 17:07:17 +08:00
|
|
|
PragmaHandlers = new PragmaNamespace(llvm::StringRef());
|
2006-06-25 05:31:03 +08:00
|
|
|
RegisterBuiltinPragmas();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-28 13:26:32 +08:00
|
|
|
// Initialize builtin macros like __LINE__ and friends.
|
|
|
|
RegisterBuiltinMacros();
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Preprocessor::~Preprocessor() {
|
2008-08-23 20:12:06 +08:00
|
|
|
assert(BacktrackPositions.empty() && "EnableBacktrack/Backtrack imbalance!");
|
|
|
|
|
2006-07-03 04:34:39 +08:00
|
|
|
while (!IncludeMacroStack.empty()) {
|
|
|
|
delete IncludeMacroStack.back().TheLexer;
|
2008-03-09 10:26:03 +08:00
|
|
|
delete IncludeMacroStack.back().TheTokenLexer;
|
2006-07-03 04:34:39 +08:00
|
|
|
IncludeMacroStack.pop_back();
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
2007-10-07 16:44:20 +08:00
|
|
|
|
|
|
|
// Free any macro definitions.
|
2010-10-20 05:30:11 +08:00
|
|
|
for (MacroInfoChain *I = MIChainHead ; I ; I = I->Next)
|
2010-10-20 02:16:54 +08:00
|
|
|
I->MI.Destroy();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-07-15 08:25:26 +08:00
|
|
|
// Free any cached macro expanders.
|
2008-03-09 10:26:03 +08:00
|
|
|
for (unsigned i = 0, e = NumCachedTokenLexers; i != e; ++i)
|
|
|
|
delete TokenLexerCache[i];
|
2010-03-13 18:17:05 +08:00
|
|
|
|
2009-12-15 09:51:03 +08:00
|
|
|
// Free any cached MacroArgs.
|
|
|
|
for (MacroArgs *ArgList = MacroArgCache; ArgList; )
|
|
|
|
ArgList = ArgList->deallocate();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-25 05:31:03 +08:00
|
|
|
// Release pragma information.
|
|
|
|
delete PragmaHandlers;
|
2006-06-28 14:49:17 +08:00
|
|
|
|
|
|
|
// Delete the scratch buffer info.
|
|
|
|
delete ScratchBuf;
|
2008-03-14 14:07:05 +08:00
|
|
|
|
2009-11-12 05:44:21 +08:00
|
|
|
// Delete the header search info, if we own it.
|
|
|
|
if (OwnsHeaderSearch)
|
|
|
|
delete &HeaderInfo;
|
|
|
|
|
2008-03-14 14:07:05 +08:00
|
|
|
delete Callbacks;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
|
2009-02-12 11:26:59 +08:00
|
|
|
void Preprocessor::setPTHManager(PTHManager* pm) {
|
|
|
|
PTH.reset(pm);
|
2009-10-17 02:18:30 +08:00
|
|
|
FileMgr.addStatCache(PTH->createStatCache());
|
2009-02-12 11:26:59 +08:00
|
|
|
}
|
|
|
|
|
2007-07-21 00:59:19 +08:00
|
|
|
void Preprocessor::DumpToken(const Token &Tok, bool DumpFlags) const {
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << tok::getTokenName(Tok.getKind()) << " '"
|
|
|
|
<< getSpelling(Tok) << "'";
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-19 00:22:51 +08:00
|
|
|
if (!DumpFlags) return;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << "\t";
|
2006-06-19 00:22:51 +08:00
|
|
|
if (Tok.isAtStartOfLine())
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << " [StartOfLine]";
|
2006-06-19 00:22:51 +08:00
|
|
|
if (Tok.hasLeadingSpace())
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << " [LeadingSpace]";
|
2006-07-27 14:59:25 +08:00
|
|
|
if (Tok.isExpandDisabled())
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << " [ExpandDisabled]";
|
2006-06-19 00:22:51 +08:00
|
|
|
if (Tok.needsCleaning()) {
|
2006-06-19 00:32:35 +08:00
|
|
|
const char *Start = SourceMgr.getCharacterData(Tok.getLocation());
|
2010-08-11 22:47:12 +08:00
|
|
|
llvm::errs() << " [UnClean='" << llvm::StringRef(Start, Tok.getLength())
|
2009-08-23 20:08:50 +08:00
|
|
|
<< "']";
|
2006-06-19 00:22:51 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << "\tLoc=<";
|
2007-12-10 04:31:55 +08:00
|
|
|
DumpLocation(Tok.getLocation());
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << ">";
|
2007-12-10 04:31:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Preprocessor::DumpLocation(SourceLocation Loc) const {
|
2009-01-27 15:57:44 +08:00
|
|
|
Loc.dump(SourceMgr);
|
2006-06-19 00:22:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Preprocessor::DumpMacro(const MacroInfo &MI) const {
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << "MACRO: ";
|
2006-06-19 00:22:51 +08:00
|
|
|
for (unsigned i = 0, e = MI.getNumTokens(); i != e; ++i) {
|
|
|
|
DumpToken(MI.getReplacementToken(i));
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << " ";
|
2006-06-19 00:22:51 +08:00
|
|
|
}
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << "\n";
|
2006-06-19 00:22:51 +08:00
|
|
|
}
|
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
void Preprocessor::PrintStats() {
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << "\n*** Preprocessor Stats:\n";
|
|
|
|
llvm::errs() << NumDirectives << " directives found:\n";
|
|
|
|
llvm::errs() << " " << NumDefined << " #define.\n";
|
|
|
|
llvm::errs() << " " << NumUndefined << " #undef.\n";
|
|
|
|
llvm::errs() << " #include/#include_next/#import:\n";
|
|
|
|
llvm::errs() << " " << NumEnteredSourceFiles << " source files entered.\n";
|
|
|
|
llvm::errs() << " " << MaxIncludeStackDepth << " max include stack depth\n";
|
|
|
|
llvm::errs() << " " << NumIf << " #if/#ifndef/#ifdef.\n";
|
|
|
|
llvm::errs() << " " << NumElse << " #else/#elif.\n";
|
|
|
|
llvm::errs() << " " << NumEndif << " #endif.\n";
|
|
|
|
llvm::errs() << " " << NumPragma << " #pragma.\n";
|
|
|
|
llvm::errs() << NumSkipped << " #if/#ifndef#ifdef regions skipped\n";
|
|
|
|
|
|
|
|
llvm::errs() << NumMacroExpanded << "/" << NumFnMacroExpanded << "/"
|
2008-01-15 00:44:48 +08:00
|
|
|
<< NumBuiltinMacroExpanded << " obj/fn/builtin macros expanded, "
|
|
|
|
<< NumFastMacroExpanded << " on the fast path.\n";
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << (NumFastTokenPaste+NumTokenPaste)
|
2008-01-15 00:44:48 +08:00
|
|
|
<< " token paste (##) operations performed, "
|
|
|
|
<< NumFastTokenPaste << " on the fast path.\n";
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
|
2010-03-13 18:17:05 +08:00
|
|
|
Preprocessor::macro_iterator
|
|
|
|
Preprocessor::macro_begin(bool IncludeExternalMacros) const {
|
|
|
|
if (IncludeExternalMacros && ExternalSource &&
|
2010-01-05 03:18:44 +08:00
|
|
|
!ReadMacrosFromExternalSource) {
|
|
|
|
ReadMacrosFromExternalSource = true;
|
|
|
|
ExternalSource->ReadDefinedMacros();
|
|
|
|
}
|
2010-03-13 18:17:05 +08:00
|
|
|
|
|
|
|
return Macros.begin();
|
2010-01-05 03:18:44 +08:00
|
|
|
}
|
|
|
|
|
2010-03-13 18:17:05 +08:00
|
|
|
Preprocessor::macro_iterator
|
|
|
|
Preprocessor::macro_end(bool IncludeExternalMacros) const {
|
|
|
|
if (IncludeExternalMacros && ExternalSource &&
|
2010-01-05 03:18:44 +08:00
|
|
|
!ReadMacrosFromExternalSource) {
|
|
|
|
ReadMacrosFromExternalSource = true;
|
|
|
|
ExternalSource->ReadDefinedMacros();
|
|
|
|
}
|
2010-03-13 18:17:05 +08:00
|
|
|
|
|
|
|
return Macros.end();
|
2010-01-05 03:18:44 +08:00
|
|
|
}
|
|
|
|
|
2010-03-13 18:17:05 +08:00
|
|
|
bool Preprocessor::SetCodeCompletionPoint(const FileEntry *File,
|
|
|
|
unsigned TruncateAtLine,
|
2009-12-02 14:49:09 +08:00
|
|
|
unsigned TruncateAtColumn) {
|
|
|
|
using llvm::MemoryBuffer;
|
|
|
|
|
|
|
|
CodeCompletionFile = File;
|
|
|
|
|
|
|
|
// Okay to clear out the code-completion point by passing NULL.
|
|
|
|
if (!CodeCompletionFile)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Load the actual file's contents.
|
2010-03-17 03:49:24 +08:00
|
|
|
bool Invalid = false;
|
|
|
|
const MemoryBuffer *Buffer = SourceMgr.getMemoryBufferForFile(File, &Invalid);
|
|
|
|
if (Invalid)
|
2009-12-02 14:49:09 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// Find the byte position of the truncation point.
|
|
|
|
const char *Position = Buffer->getBufferStart();
|
|
|
|
for (unsigned Line = 1; Line < TruncateAtLine; ++Line) {
|
|
|
|
for (; *Position; ++Position) {
|
|
|
|
if (*Position != '\r' && *Position != '\n')
|
|
|
|
continue;
|
2010-03-13 18:17:05 +08:00
|
|
|
|
2009-12-02 14:49:09 +08:00
|
|
|
// Eat \r\n or \n\r as a single line.
|
|
|
|
if ((Position[1] == '\r' || Position[1] == '\n') &&
|
|
|
|
Position[0] != Position[1])
|
|
|
|
++Position;
|
|
|
|
++Position;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2010-03-13 18:17:05 +08:00
|
|
|
|
2009-12-09 05:45:46 +08:00
|
|
|
Position += TruncateAtColumn - 1;
|
2010-03-13 18:17:05 +08:00
|
|
|
|
2009-12-02 14:49:09 +08:00
|
|
|
// Truncate the buffer.
|
2009-12-09 05:45:46 +08:00
|
|
|
if (Position < Buffer->getBufferEnd()) {
|
2010-04-06 06:42:27 +08:00
|
|
|
llvm::StringRef Data(Buffer->getBufferStart(),
|
|
|
|
Position-Buffer->getBufferStart());
|
2010-03-13 18:17:05 +08:00
|
|
|
MemoryBuffer *TruncatedBuffer
|
2010-04-06 06:42:27 +08:00
|
|
|
= MemoryBuffer::getMemBufferCopy(Data, Buffer->getBufferIdentifier());
|
2009-12-02 14:49:09 +08:00
|
|
|
SourceMgr.overrideFileContents(File, TruncatedBuffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-12-04 01:05:59 +08:00
|
|
|
bool Preprocessor::isCodeCompletionFile(SourceLocation FileLoc) const {
|
2009-12-02 14:49:09 +08:00
|
|
|
return CodeCompletionFile && FileLoc.isFileID() &&
|
|
|
|
SourceMgr.getFileEntryForID(SourceMgr.getFileID(FileLoc))
|
|
|
|
== CodeCompletionFile;
|
|
|
|
}
|
|
|
|
|
2010-08-26 01:04:25 +08:00
|
|
|
void Preprocessor::CodeCompleteNaturalLanguage() {
|
|
|
|
SetCodeCompletionPoint(0, 0, 0);
|
|
|
|
getDiagnostics().setSuppressAllDiagnostics(true);
|
|
|
|
if (CodeComplete)
|
|
|
|
CodeComplete->CodeCompleteNaturalLanguage();
|
|
|
|
}
|
|
|
|
|
2006-06-19 00:22:51 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Token Spelling
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// getSpelling() - Return the 'spelling' of this token. The spelling of a
|
|
|
|
/// token are the characters used to represent the token in the source file
|
|
|
|
/// after trigraph expansion and escaped-newline folding. In particular, this
|
|
|
|
/// wants to get the true, uncanonicalized, spelling of things like digraphs
|
|
|
|
/// UCNs, etc.
|
2009-11-14 09:20:48 +08:00
|
|
|
std::string Preprocessor::getSpelling(const Token &Tok,
|
|
|
|
const SourceManager &SourceMgr,
|
2010-03-16 13:20:39 +08:00
|
|
|
const LangOptions &Features,
|
|
|
|
bool *Invalid) {
|
2006-06-19 00:22:51 +08:00
|
|
|
assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
|
2009-01-27 08:01:05 +08:00
|
|
|
|
2006-06-19 00:22:51 +08:00
|
|
|
// If this token contains nothing interesting, return it directly.
|
2010-03-16 13:20:39 +08:00
|
|
|
bool CharDataInvalid = false;
|
|
|
|
const char* TokStart = SourceMgr.getCharacterData(Tok.getLocation(),
|
|
|
|
&CharDataInvalid);
|
|
|
|
if (Invalid)
|
|
|
|
*Invalid = CharDataInvalid;
|
|
|
|
if (CharDataInvalid)
|
|
|
|
return std::string();
|
|
|
|
|
2006-06-19 00:22:51 +08:00
|
|
|
if (!Tok.needsCleaning())
|
|
|
|
return std::string(TokStart, TokStart+Tok.getLength());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-19 00:22:51 +08:00
|
|
|
std::string Result;
|
|
|
|
Result.reserve(Tok.getLength());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-07-05 06:33:12 +08:00
|
|
|
// Otherwise, hard case, relex the characters into the string.
|
2006-06-19 00:22:51 +08:00
|
|
|
for (const char *Ptr = TokStart, *End = TokStart+Tok.getLength();
|
|
|
|
Ptr != End; ) {
|
|
|
|
unsigned CharSize;
|
|
|
|
Result.push_back(Lexer::getCharAndSizeNoWarn(Ptr, CharSize, Features));
|
|
|
|
Ptr += CharSize;
|
|
|
|
}
|
|
|
|
assert(Result.size() != unsigned(Tok.getLength()) &&
|
|
|
|
"NeedsCleaning flag set on something that didn't need cleaning!");
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2009-11-14 09:20:48 +08:00
|
|
|
/// getSpelling() - Return the 'spelling' of this token. The spelling of a
|
|
|
|
/// token are the characters used to represent the token in the source file
|
|
|
|
/// after trigraph expansion and escaped-newline folding. In particular, this
|
|
|
|
/// wants to get the true, uncanonicalized, spelling of things like digraphs
|
|
|
|
/// UCNs, etc.
|
2010-03-16 13:20:39 +08:00
|
|
|
std::string Preprocessor::getSpelling(const Token &Tok, bool *Invalid) const {
|
|
|
|
return getSpelling(Tok, SourceMgr, Features, Invalid);
|
2009-11-14 09:20:48 +08:00
|
|
|
}
|
|
|
|
|
2006-06-19 00:22:51 +08:00
|
|
|
/// getSpelling - This method is used to get the spelling of a token into a
|
|
|
|
/// preallocated buffer, instead of as an std::string. The caller is required
|
|
|
|
/// to allocate enough space for the token, which is guaranteed to be at least
|
|
|
|
/// Tok.getLength() bytes long. The actual length of the token is returned.
|
2006-07-05 06:33:12 +08:00
|
|
|
///
|
|
|
|
/// Note that this method may do two possible things: it may either fill in
|
|
|
|
/// the buffer specified with characters, or it may *change the input pointer*
|
|
|
|
/// to point to a constant buffer with the data already in it (avoiding a
|
|
|
|
/// copy). The caller is not allowed to modify the returned buffer pointer
|
|
|
|
/// if an internal buffer is returned.
|
2010-08-31 01:47:05 +08:00
|
|
|
unsigned Preprocessor::getSpelling(const Token &Tok,
|
|
|
|
const char *&Buffer, bool *Invalid) const {
|
2006-06-19 00:22:51 +08:00
|
|
|
assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-07-05 07:01:03 +08:00
|
|
|
// If this token is an identifier, just return the string from the identifier
|
|
|
|
// table, which is very quick.
|
|
|
|
if (const IdentifierInfo *II = Tok.getIdentifierInfo()) {
|
2010-08-31 01:47:05 +08:00
|
|
|
Buffer = II->getNameStart();
|
|
|
|
return II->getLength();
|
2006-07-05 07:01:03 +08:00
|
|
|
}
|
2009-01-08 10:47:16 +08:00
|
|
|
|
2006-07-05 07:01:03 +08:00
|
|
|
// Otherwise, compute the start of the token in the input lexer buffer.
|
2009-01-27 03:29:26 +08:00
|
|
|
const char *TokStart = 0;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-27 03:29:26 +08:00
|
|
|
if (Tok.isLiteral())
|
|
|
|
TokStart = Tok.getLiteralData();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-03-16 13:20:39 +08:00
|
|
|
if (TokStart == 0) {
|
|
|
|
bool CharDataInvalid = false;
|
|
|
|
TokStart = SourceMgr.getCharacterData(Tok.getLocation(), &CharDataInvalid);
|
|
|
|
if (Invalid)
|
|
|
|
*Invalid = CharDataInvalid;
|
|
|
|
if (CharDataInvalid) {
|
|
|
|
Buffer = "";
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
2006-06-19 00:22:51 +08:00
|
|
|
|
|
|
|
// If this token contains nothing interesting, return it directly.
|
2010-08-31 01:47:05 +08:00
|
|
|
if (!Tok.needsCleaning()) {
|
2006-07-05 06:33:12 +08:00
|
|
|
Buffer = TokStart;
|
2010-08-31 01:47:05 +08:00
|
|
|
return Tok.getLength();
|
2006-06-19 00:22:51 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-19 00:22:51 +08:00
|
|
|
// Otherwise, hard case, relex the characters into the string.
|
2006-07-05 06:33:12 +08:00
|
|
|
char *OutBuf = const_cast<char*>(Buffer);
|
2010-08-31 01:47:05 +08:00
|
|
|
for (const char *Ptr = TokStart, *End = TokStart+Tok.getLength();
|
2006-06-19 00:22:51 +08:00
|
|
|
Ptr != End; ) {
|
|
|
|
unsigned CharSize;
|
|
|
|
*OutBuf++ = Lexer::getCharAndSizeNoWarn(Ptr, CharSize, Features);
|
|
|
|
Ptr += CharSize;
|
|
|
|
}
|
2010-08-31 01:47:05 +08:00
|
|
|
assert(unsigned(OutBuf-Buffer) != Tok.getLength() &&
|
2006-06-19 00:22:51 +08:00
|
|
|
"NeedsCleaning flag set on something that didn't need cleaning!");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-19 00:22:51 +08:00
|
|
|
return OutBuf-Buffer;
|
|
|
|
}
|
|
|
|
|
2010-02-28 01:05:45 +08:00
|
|
|
/// getSpelling - This method is used to get the spelling of a token into a
|
|
|
|
/// SmallVector. Note that the returned StringRef may not point to the
|
|
|
|
/// supplied buffer if a copy can be avoided.
|
|
|
|
llvm::StringRef Preprocessor::getSpelling(const Token &Tok,
|
2010-03-16 13:20:39 +08:00
|
|
|
llvm::SmallVectorImpl<char> &Buffer,
|
|
|
|
bool *Invalid) const {
|
2010-02-28 01:05:45 +08:00
|
|
|
// Try the fast path.
|
|
|
|
if (const IdentifierInfo *II = Tok.getIdentifierInfo())
|
|
|
|
return II->getName();
|
|
|
|
|
|
|
|
// Resize the buffer if we need to copy into it.
|
|
|
|
if (Tok.needsCleaning())
|
|
|
|
Buffer.resize(Tok.getLength());
|
|
|
|
|
|
|
|
const char *Ptr = Buffer.data();
|
2010-03-16 13:20:39 +08:00
|
|
|
unsigned Len = getSpelling(Tok, Ptr, Invalid);
|
2010-02-28 01:05:45 +08:00
|
|
|
return llvm::StringRef(Ptr, Len);
|
|
|
|
}
|
|
|
|
|
2006-07-14 14:54:10 +08:00
|
|
|
/// CreateString - Plop the specified string into a scratch buffer and return a
|
|
|
|
/// location for it. If specified, the source location provides a source
|
|
|
|
/// location for the token.
|
2009-01-27 03:29:26 +08:00
|
|
|
void Preprocessor::CreateString(const char *Buf, unsigned Len, Token &Tok,
|
|
|
|
SourceLocation InstantiationLoc) {
|
|
|
|
Tok.setLength(Len);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-27 03:29:26 +08:00
|
|
|
const char *DestPtr;
|
|
|
|
SourceLocation Loc = ScratchBuf->getToken(Buf, Len, DestPtr);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-27 03:29:26 +08:00
|
|
|
if (InstantiationLoc.isValid())
|
2009-02-16 04:52:18 +08:00
|
|
|
Loc = SourceMgr.createInstantiationLoc(Loc, InstantiationLoc,
|
|
|
|
InstantiationLoc, Len);
|
2009-01-27 03:29:26 +08:00
|
|
|
Tok.setLocation(Loc);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-27 03:29:26 +08:00
|
|
|
// If this is a literal token, set the pointer data.
|
|
|
|
if (Tok.isLiteral())
|
|
|
|
Tok.setLiteralData(DestPtr);
|
2006-07-14 14:54:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-07-16 14:48:38 +08:00
|
|
|
/// AdvanceToTokenCharacter - Given a location that specifies the start of a
|
|
|
|
/// token, return a new location that specifies a character within the token.
|
2009-09-09 23:08:12 +08:00
|
|
|
SourceLocation Preprocessor::AdvanceToTokenCharacter(SourceLocation TokStart,
|
2007-07-16 14:48:38 +08:00
|
|
|
unsigned CharNo) {
|
2009-01-16 15:36:28 +08:00
|
|
|
// Figure out how many physical characters away the specified instantiation
|
2007-07-16 14:48:38 +08:00
|
|
|
// character is. This needs to take into consideration newlines and
|
|
|
|
// trigraphs.
|
2010-03-17 04:46:42 +08:00
|
|
|
bool Invalid = false;
|
|
|
|
const char *TokPtr = SourceMgr.getCharacterData(TokStart, &Invalid);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-04-19 06:28:58 +08:00
|
|
|
// If they request the first char of the token, we're trivially done.
|
2010-03-17 04:46:42 +08:00
|
|
|
if (Invalid || (CharNo == 0 && Lexer::isObviouslySimpleCharacter(*TokPtr)))
|
2009-04-19 06:28:58 +08:00
|
|
|
return TokStart;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-07-21 00:37:10 +08:00
|
|
|
unsigned PhysOffset = 0;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-07-16 14:48:38 +08:00
|
|
|
// The usual case is that tokens don't contain anything interesting. Skip
|
|
|
|
// over the uninteresting characters. If a token only consists of simple
|
|
|
|
// chars, this method is extremely fast.
|
2009-04-19 06:28:58 +08:00
|
|
|
while (Lexer::isObviouslySimpleCharacter(*TokPtr)) {
|
|
|
|
if (CharNo == 0)
|
|
|
|
return TokStart.getFileLocWithOffset(PhysOffset);
|
2007-07-21 00:37:10 +08:00
|
|
|
++TokPtr, --CharNo, ++PhysOffset;
|
2009-04-19 06:28:58 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-17 15:57:25 +08:00
|
|
|
// If we have a character that may be a trigraph or escaped newline, use a
|
2007-07-16 14:48:38 +08:00
|
|
|
// lexer to parse it correctly.
|
2009-04-19 06:28:58 +08:00
|
|
|
for (; CharNo; --CharNo) {
|
|
|
|
unsigned Size;
|
|
|
|
Lexer::getCharAndSizeNoWarn(TokPtr, Size, Features);
|
|
|
|
TokPtr += Size;
|
|
|
|
PhysOffset += Size;
|
2007-07-16 14:48:38 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-04-19 06:28:58 +08:00
|
|
|
// Final detail: if we end up on an escaped newline, we want to return the
|
|
|
|
// location of the actual byte of the token. For example foo\<newline>bar
|
|
|
|
// advanced by 3 should return the location of b, not of \\. One compounding
|
|
|
|
// detail of this is that the escape may be made by a trigraph.
|
|
|
|
if (!Lexer::isObviouslySimpleCharacter(*TokPtr))
|
2010-01-30 03:38:24 +08:00
|
|
|
PhysOffset += Lexer::SkipEscapedNewLines(TokPtr)-TokPtr;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-07-21 00:37:10 +08:00
|
|
|
return TokStart.getFileLocWithOffset(PhysOffset);
|
2007-07-16 14:48:38 +08:00
|
|
|
}
|
|
|
|
|
2010-03-13 18:17:05 +08:00
|
|
|
SourceLocation Preprocessor::getLocForEndOfToken(SourceLocation Loc,
|
2010-01-23 03:49:59 +08:00
|
|
|
unsigned Offset) {
|
2009-02-28 01:53:17 +08:00
|
|
|
if (Loc.isInvalid() || !Loc.isFileID())
|
|
|
|
return SourceLocation();
|
|
|
|
|
2009-04-15 07:22:57 +08:00
|
|
|
unsigned Len = Lexer::MeasureTokenLength(Loc, getSourceManager(), Features);
|
2010-01-23 03:49:59 +08:00
|
|
|
if (Len > Offset)
|
|
|
|
Len = Len - Offset;
|
|
|
|
else
|
|
|
|
return Loc;
|
2010-03-13 18:17:05 +08:00
|
|
|
|
2009-02-28 01:53:17 +08:00
|
|
|
return AdvanceToTokenCharacter(Loc, Len);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-07-16 14:48:38 +08:00
|
|
|
|
2007-10-10 06:10:18 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Preprocessor Initialization Methods
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
|
|
|
|
/// EnterMainSourceFile - Enter the specified FileID as the main source file,
|
2008-01-07 12:01:26 +08:00
|
|
|
/// which implicitly adds the builtin defines etc.
|
2010-04-21 04:35:58 +08:00
|
|
|
void Preprocessor::EnterMainSourceFile() {
|
2009-02-14 03:33:24 +08:00
|
|
|
// We do not allow the preprocessor to reenter the main file. Doing so will
|
|
|
|
// cause FileID's to accumulate information from both runs (e.g. #line
|
|
|
|
// information) and predefined macros aren't guaranteed to be set properly.
|
|
|
|
assert(NumEnteredSourceFiles == 0 && "Cannot reenter the main file!");
|
2009-01-17 14:22:33 +08:00
|
|
|
FileID MainFileID = SourceMgr.getMainFileID();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-10-10 06:10:18 +08:00
|
|
|
// Enter the main file source buffer.
|
2010-04-21 04:35:58 +08:00
|
|
|
EnterSourceFile(MainFileID, 0, SourceLocation());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
Introduce basic support for loading a precompiled preamble while
reparsing an ASTUnit. When saving a preamble, create a buffer larger
than the actual file we're working with but fill everything from the
end of the preamble to the end of the file with spaces (so the lexer
will quickly skip them). When we load the file, create a buffer of the
same size, filling it with the file and then spaces. Then, instruct
the lexer to start lexing after the preamble, therefore continuing the
parse from the spot where the preamble left off.
It's now possible to perform a simple preamble build + parse (+
reparse) with ASTUnit. However, one has to disable a bunch of checking
in the PCH reader to do so. That part isn't committed; it will likely
be handled with some other kind of flag (e.g., -fno-validate-pch).
As part of this, fix some issues with null termination of the memory
buffers created for the preamble; we were trying to explicitly
NULL-terminate them, even though they were also getting implicitly
NULL terminated, leading to excess warnings about NULL characters in
source files.
llvm-svn: 109445
2010-07-27 05:36:20 +08:00
|
|
|
// If we've been asked to skip bytes in the main file (e.g., as part of a
|
|
|
|
// precompiled preamble), do so now.
|
|
|
|
if (SkipMainFilePreamble.first > 0)
|
|
|
|
CurLexer->SkipBytes(SkipMainFilePreamble.first,
|
|
|
|
SkipMainFilePreamble.second);
|
|
|
|
|
2007-11-16 03:07:47 +08:00
|
|
|
// Tell the header info that the main file was entered. If the file is later
|
|
|
|
// #imported, it won't be re-entered.
|
2009-01-17 14:22:33 +08:00
|
|
|
if (const FileEntry *FE = SourceMgr.getFileEntryForID(MainFileID))
|
2007-11-16 03:07:47 +08:00
|
|
|
HeaderInfo.IncrementIncludeCount(FE);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-12-31 23:33:09 +08:00
|
|
|
// Preprocess Predefines to populate the initial preprocessor state.
|
2009-09-09 23:08:12 +08:00
|
|
|
llvm::MemoryBuffer *SB =
|
2010-04-06 06:42:27 +08:00
|
|
|
llvm::MemoryBuffer::getMemBufferCopy(Predefines, "<built-in>");
|
2010-08-26 22:07:34 +08:00
|
|
|
assert(SB && "Cannot create predefined source buffer");
|
2009-01-17 14:22:33 +08:00
|
|
|
FileID FID = SourceMgr.createFileIDForMemBuffer(SB);
|
|
|
|
assert(!FID.isInvalid() && "Could not create FileID for predefines?");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-10-10 06:10:18 +08:00
|
|
|
// Start parsing the predefines.
|
2010-04-21 04:35:58 +08:00
|
|
|
EnterSourceFile(FID, 0, SourceLocation());
|
2007-10-10 06:10:18 +08:00
|
|
|
}
|
2007-07-16 14:48:38 +08:00
|
|
|
|
2010-03-23 13:09:10 +08:00
|
|
|
void Preprocessor::EndSourceFile() {
|
|
|
|
// Notify the client that we reached the end of the source file.
|
|
|
|
if (Callbacks)
|
|
|
|
Callbacks->EndOfMainFile();
|
|
|
|
}
|
2006-06-28 13:26:32 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Lexer Event Handling.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2006-07-08 16:28:12 +08:00
|
|
|
/// LookUpIdentifierInfo - Given a tok::identifier token, look up the
|
|
|
|
/// identifier information for the token and install it into the token.
|
2007-07-21 00:59:19 +08:00
|
|
|
IdentifierInfo *Preprocessor::LookUpIdentifierInfo(Token &Identifier,
|
2009-11-05 09:53:52 +08:00
|
|
|
const char *BufPtr) const {
|
2007-10-10 02:02:16 +08:00
|
|
|
assert(Identifier.is(tok::identifier) && "Not an identifier!");
|
2006-07-08 16:28:12 +08:00
|
|
|
assert(Identifier.getIdentifierInfo() == 0 && "Identinfo already exists!");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-07-08 16:28:12 +08:00
|
|
|
// Look up this token, see if it is a macro, or if it is a language keyword.
|
|
|
|
IdentifierInfo *II;
|
|
|
|
if (BufPtr && !Identifier.needsCleaning()) {
|
|
|
|
// No cleaning needed, just use the characters from the lexed buffer.
|
2009-11-05 09:53:39 +08:00
|
|
|
II = getIdentifierInfo(llvm::StringRef(BufPtr, Identifier.getLength()));
|
2006-07-08 16:28:12 +08:00
|
|
|
} else {
|
|
|
|
// Cleaning needed, alloca a buffer, clean into it, then use the buffer.
|
2010-03-13 18:17:05 +08:00
|
|
|
llvm::SmallString<64> IdentifierBuffer;
|
2010-02-27 21:44:12 +08:00
|
|
|
llvm::StringRef CleanedStr = getSpelling(Identifier, IdentifierBuffer);
|
|
|
|
II = getIdentifierInfo(CleanedStr);
|
2006-07-08 16:28:12 +08:00
|
|
|
}
|
2006-10-14 13:19:21 +08:00
|
|
|
Identifier.setIdentifierInfo(II);
|
2006-07-08 16:28:12 +08:00
|
|
|
return II;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-06-28 13:26:32 +08:00
|
|
|
/// HandleIdentifier - This callback is invoked when the lexer reads an
|
|
|
|
/// identifier. This callback looks up the identifier in the map and/or
|
|
|
|
/// potentially macro expands it or turns it into a named token (like 'for').
|
2009-01-21 15:43:11 +08:00
|
|
|
///
|
|
|
|
/// Note that callers of this method are guarded by checking the
|
|
|
|
/// IdentifierInfo's 'isHandleIdentifierCase' bit. If this method changes, the
|
|
|
|
/// IdentifierInfo methods that compute these properties will need to change to
|
|
|
|
/// match.
|
2007-07-21 00:59:19 +08:00
|
|
|
void Preprocessor::HandleIdentifier(Token &Identifier) {
|
2006-07-20 12:16:23 +08:00
|
|
|
assert(Identifier.getIdentifierInfo() &&
|
|
|
|
"Can't handle identifiers without identifier info!");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-07-05 01:53:21 +08:00
|
|
|
IdentifierInfo &II = *Identifier.getIdentifierInfo();
|
2006-06-28 13:26:32 +08:00
|
|
|
|
|
|
|
// If this identifier was poisoned, and if it was not produced from a macro
|
|
|
|
// expansion, emit an error.
|
2008-11-20 06:43:49 +08:00
|
|
|
if (II.isPoisoned() && CurPPLexer) {
|
2006-07-06 13:17:39 +08:00
|
|
|
if (&II != Ident__VA_ARGS__) // We warn about __VA_ARGS__ with poisoning.
|
|
|
|
Diag(Identifier, diag::err_pp_used_poisoned_id);
|
|
|
|
else
|
|
|
|
Diag(Identifier, diag::ext_pp_bad_vaargs_use);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-07-09 08:45:31 +08:00
|
|
|
// If this is a macro to be expanded, do it.
|
2007-10-07 16:44:20 +08:00
|
|
|
if (MacroInfo *MI = getMacroInfo(&II)) {
|
2006-07-27 14:59:25 +08:00
|
|
|
if (!DisableMacroExpansion && !Identifier.isExpandDisabled()) {
|
|
|
|
if (MI->isEnabled()) {
|
|
|
|
if (!HandleMacroExpandedIdentifier(Identifier, MI))
|
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
// C99 6.10.3.4p2 says that a disabled macro may never again be
|
|
|
|
// expanded, even if it's in a context where it could be expanded in the
|
|
|
|
// future.
|
2007-07-21 00:59:19 +08:00
|
|
|
Identifier.setFlag(Token::DisableExpand);
|
2006-07-27 14:59:25 +08:00
|
|
|
}
|
|
|
|
}
|
2006-10-15 03:54:15 +08:00
|
|
|
}
|
2006-06-28 13:26:32 +08:00
|
|
|
|
2006-11-22 01:23:33 +08:00
|
|
|
// C++ 2.11p2: If this is an alternative representation of a C++ operator,
|
|
|
|
// then we act as if it is the actual operator and not the textual
|
|
|
|
// representation of it.
|
2010-09-04 01:33:04 +08:00
|
|
|
if (II.isCPlusPlusOperatorKeyword())
|
2006-11-22 01:23:33 +08:00
|
|
|
Identifier.setIdentifierInfo(0);
|
|
|
|
|
2006-06-28 13:26:32 +08:00
|
|
|
// If this is an extension token, diagnose its use.
|
2008-09-03 02:50:17 +08:00
|
|
|
// We avoid diagnosing tokens that originate from macro definitions.
|
2009-04-28 11:59:15 +08:00
|
|
|
// FIXME: This warning is disabled in cases where it shouldn't be,
|
|
|
|
// like "#define TY typeof", "TY(1) x".
|
|
|
|
if (II.isExtensionToken() && !DisableMacroExpansion)
|
2007-06-14 04:44:40 +08:00
|
|
|
Diag(Identifier, diag::ext_token_used);
|
2006-06-28 13:26:32 +08:00
|
|
|
}
|
Add support for retrieving the Doxygen comment associated with a given
declaration in the AST.
The new ASTContext::getCommentForDecl function searches for a comment
that is attached to the given declaration, and returns that comment,
which may be composed of several comment blocks.
Comments are always available in an AST. However, to avoid harming
performance, we don't actually parse the comments. Rather, we keep the
source ranges of all of the comments within a large, sorted vector,
then lazily extract comments via a binary search in that vector only
when needed (which never occurs in a "normal" compile).
Comments are written to a precompiled header/AST file as a blob of
source ranges. That blob is only lazily loaded when one requests a
comment for a declaration (this never occurs in a "normal" compile).
The indexer testbed now supports comment extraction. When the
-point-at location points to a declaration with a Doxygen-style
comment, the indexer testbed prints the associated comment
block(s). See test/Index/comments.c for an example.
Some notes:
- We don't actually attempt to parse the comment blocks themselves,
beyond identifying them as Doxygen comment blocks to associate them
with a declaration.
- We won't find comment blocks that aren't adjacent to the
declaration, because we start our search based on the location of
the declaration.
- We don't go through the necessary hops to find, for example,
whether some redeclaration of a declaration has comments when our
current declaration does not. Similarly, we don't attempt to
associate a \param Foo marker in a function body comment with the
parameter named Foo (although that is certainly possible).
- Verification of my "no performance impact" claims is still "to be
done".
llvm-svn: 74704
2009-07-03 01:08:52 +08:00
|
|
|
|
|
|
|
void Preprocessor::AddCommentHandler(CommentHandler *Handler) {
|
|
|
|
assert(Handler && "NULL comment handler");
|
|
|
|
assert(std::find(CommentHandlers.begin(), CommentHandlers.end(), Handler) ==
|
|
|
|
CommentHandlers.end() && "Comment handler already registered");
|
|
|
|
CommentHandlers.push_back(Handler);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Preprocessor::RemoveCommentHandler(CommentHandler *Handler) {
|
|
|
|
std::vector<CommentHandler *>::iterator Pos
|
|
|
|
= std::find(CommentHandlers.begin(), CommentHandlers.end(), Handler);
|
|
|
|
assert(Pos != CommentHandlers.end() && "Comment handler not registered");
|
|
|
|
CommentHandlers.erase(Pos);
|
|
|
|
}
|
|
|
|
|
2010-01-19 06:35:47 +08:00
|
|
|
bool Preprocessor::HandleComment(Token &result, SourceRange Comment) {
|
|
|
|
bool AnyPendingTokens = false;
|
Add support for retrieving the Doxygen comment associated with a given
declaration in the AST.
The new ASTContext::getCommentForDecl function searches for a comment
that is attached to the given declaration, and returns that comment,
which may be composed of several comment blocks.
Comments are always available in an AST. However, to avoid harming
performance, we don't actually parse the comments. Rather, we keep the
source ranges of all of the comments within a large, sorted vector,
then lazily extract comments via a binary search in that vector only
when needed (which never occurs in a "normal" compile).
Comments are written to a precompiled header/AST file as a blob of
source ranges. That blob is only lazily loaded when one requests a
comment for a declaration (this never occurs in a "normal" compile).
The indexer testbed now supports comment extraction. When the
-point-at location points to a declaration with a Doxygen-style
comment, the indexer testbed prints the associated comment
block(s). See test/Index/comments.c for an example.
Some notes:
- We don't actually attempt to parse the comment blocks themselves,
beyond identifying them as Doxygen comment blocks to associate them
with a declaration.
- We won't find comment blocks that aren't adjacent to the
declaration, because we start our search based on the location of
the declaration.
- We don't go through the necessary hops to find, for example,
whether some redeclaration of a declaration has comments when our
current declaration does not. Similarly, we don't attempt to
associate a \param Foo marker in a function body comment with the
parameter named Foo (although that is certainly possible).
- Verification of my "no performance impact" claims is still "to be
done".
llvm-svn: 74704
2009-07-03 01:08:52 +08:00
|
|
|
for (std::vector<CommentHandler *>::iterator H = CommentHandlers.begin(),
|
|
|
|
HEnd = CommentHandlers.end();
|
2010-01-19 06:35:47 +08:00
|
|
|
H != HEnd; ++H) {
|
|
|
|
if ((*H)->HandleComment(*this, Comment))
|
|
|
|
AnyPendingTokens = true;
|
|
|
|
}
|
|
|
|
if (!AnyPendingTokens || getCommentRetentionState())
|
|
|
|
return false;
|
|
|
|
Lex(result);
|
|
|
|
return true;
|
Add support for retrieving the Doxygen comment associated with a given
declaration in the AST.
The new ASTContext::getCommentForDecl function searches for a comment
that is attached to the given declaration, and returns that comment,
which may be composed of several comment blocks.
Comments are always available in an AST. However, to avoid harming
performance, we don't actually parse the comments. Rather, we keep the
source ranges of all of the comments within a large, sorted vector,
then lazily extract comments via a binary search in that vector only
when needed (which never occurs in a "normal" compile).
Comments are written to a precompiled header/AST file as a blob of
source ranges. That blob is only lazily loaded when one requests a
comment for a declaration (this never occurs in a "normal" compile).
The indexer testbed now supports comment extraction. When the
-point-at location points to a declaration with a Doxygen-style
comment, the indexer testbed prints the associated comment
block(s). See test/Index/comments.c for an example.
Some notes:
- We don't actually attempt to parse the comment blocks themselves,
beyond identifying them as Doxygen comment blocks to associate them
with a declaration.
- We won't find comment blocks that aren't adjacent to the
declaration, because we start our search based on the location of
the declaration.
- We don't go through the necessary hops to find, for example,
whether some redeclaration of a declaration has comments when our
current declaration does not. Similarly, we don't attempt to
associate a \param Foo marker in a function body comment with the
parameter named Foo (although that is certainly possible).
- Verification of my "no performance impact" claims is still "to be
done".
llvm-svn: 74704
2009-07-03 01:08:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
CommentHandler::~CommentHandler() { }
|
2010-03-20 00:15:56 +08:00
|
|
|
|
2010-08-25 03:08:16 +08:00
|
|
|
CodeCompletionHandler::~CodeCompletionHandler() { }
|
|
|
|
|
2010-03-20 00:15:56 +08:00
|
|
|
void Preprocessor::createPreprocessingRecord() {
|
|
|
|
if (Record)
|
|
|
|
return;
|
|
|
|
|
2010-03-20 01:12:43 +08:00
|
|
|
Record = new PreprocessingRecord;
|
|
|
|
addPPCallbacks(Record);
|
2010-03-20 00:15:56 +08:00
|
|
|
}
|