2017-12-05 07:16:21 +08:00
|
|
|
//===- Preprocess.cpp - C Language Family Preprocessor Implementation -----===//
|
2006-06-18 13:43:12 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 03:59:25 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2006-06-18 13:43:12 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements the Preprocessor interface.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Options to support:
|
|
|
|
// -H - Print the name of each header file used.
|
2009-02-06 14:45:26 +08:00
|
|
|
// -d[DNI] - Dump various things.
|
2006-06-18 13:43:12 +08:00
|
|
|
// -fworking-directory - #line's with preprocessor's working dir.
|
|
|
|
// -fpreprocessed
|
|
|
|
// -dependency-file,-M,-MM,-MF,-MG,-MP,-MT,-MQ,-MD,-MMD
|
|
|
|
// -W*
|
|
|
|
// -w
|
|
|
|
//
|
|
|
|
// Messages to emit:
|
|
|
|
// "Multiple include guards may be useful for:\n"
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "clang/Lex/Preprocessor.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "clang/Basic/FileManager.h"
|
2014-08-12 05:29:24 +08:00
|
|
|
#include "clang/Basic/FileSystemStatCache.h"
|
2017-12-05 07:16:21 +08:00
|
|
|
#include "clang/Basic/IdentifierTable.h"
|
|
|
|
#include "clang/Basic/LLVM.h"
|
|
|
|
#include "clang/Basic/LangOptions.h"
|
|
|
|
#include "clang/Basic/Module.h"
|
|
|
|
#include "clang/Basic/SourceLocation.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "clang/Basic/SourceManager.h"
|
|
|
|
#include "clang/Basic/TargetInfo.h"
|
|
|
|
#include "clang/Lex/CodeCompletionHandler.h"
|
2010-01-05 03:18:44 +08:00
|
|
|
#include "clang/Lex/ExternalPreprocessorSource.h"
|
2006-10-22 15:28:56 +08:00
|
|
|
#include "clang/Lex/HeaderSearch.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "clang/Lex/LexDiagnostic.h"
|
2017-12-05 07:16:21 +08:00
|
|
|
#include "clang/Lex/Lexer.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "clang/Lex/LiteralSupport.h"
|
2014-01-07 19:51:46 +08:00
|
|
|
#include "clang/Lex/MacroArgs.h"
|
2006-06-18 13:43:12 +08:00
|
|
|
#include "clang/Lex/MacroInfo.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "clang/Lex/ModuleLoader.h"
|
2006-06-25 05:31:03 +08:00
|
|
|
#include "clang/Lex/Pragma.h"
|
2010-03-20 00:15:56 +08:00
|
|
|
#include "clang/Lex/PreprocessingRecord.h"
|
2017-12-05 07:16:21 +08:00
|
|
|
#include "clang/Lex/PreprocessorLexer.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "clang/Lex/PreprocessorOptions.h"
|
2006-06-28 14:49:17 +08:00
|
|
|
#include "clang/Lex/ScratchBuffer.h"
|
2017-12-05 07:16:21 +08:00
|
|
|
#include "clang/Lex/Token.h"
|
|
|
|
#include "clang/Lex/TokenLexer.h"
|
2016-09-08 05:53:17 +08:00
|
|
|
#include "llvm/ADT/APInt.h"
|
2017-12-05 07:16:21 +08:00
|
|
|
#include "llvm/ADT/ArrayRef.h"
|
2016-09-08 05:53:17 +08:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
2014-01-07 19:51:46 +08:00
|
|
|
#include "llvm/ADT/SmallString.h"
|
2016-09-08 05:53:17 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/ADT/STLExtras.h"
|
|
|
|
#include "llvm/ADT/StringRef.h"
|
2016-07-19 03:02:11 +08:00
|
|
|
#include "llvm/ADT/StringSwitch.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "llvm/Support/Capacity.h"
|
2016-09-08 05:53:17 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2007-07-16 14:48:38 +08:00
|
|
|
#include "llvm/Support/MemoryBuffer.h"
|
2009-08-23 20:08:50 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2016-09-08 05:53:17 +08:00
|
|
|
#include <algorithm>
|
|
|
|
#include <cassert>
|
|
|
|
#include <memory>
|
|
|
|
#include <string>
|
2016-05-27 22:27:13 +08:00
|
|
|
#include <utility>
|
2016-09-08 05:53:17 +08:00
|
|
|
#include <vector>
|
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
using namespace clang;
|
|
|
|
|
2016-08-05 19:01:08 +08:00
|
|
|
LLVM_INSTANTIATE_REGISTRY(PragmaHandlerRegistry)
|
2016-04-04 23:30:44 +08:00
|
|
|
|
2017-12-05 07:16:21 +08:00
|
|
|
ExternalPreprocessorSource::~ExternalPreprocessorSource() = default;
|
2006-06-18 13:43:12 +08:00
|
|
|
|
2017-01-06 03:11:36 +08:00
|
|
|
Preprocessor::Preprocessor(std::shared_ptr<PreprocessorOptions> PPOpts,
|
2012-10-25 01:46:57 +08:00
|
|
|
DiagnosticsEngine &diags, LangOptions &opts,
|
Reapply "Modules: Cache PCMs in memory and avoid a use-after-free"
This reverts commit r298185, effectively reapplying r298165, after fixing the
new unit tests (PR32338). The memory buffer generator doesn't null-terminate
the MemoryBuffer it creates; this version of the commit informs getMemBuffer
about that to avoid the assert.
Original commit message follows:
----
Clang's internal build system for implicit modules uses lock files to
ensure that after a process writes a PCM it will read the same one back
in (without contention from other -cc1 commands). Since PCMs are read
from disk repeatedly while invalidating, building, and importing, the
lock is not released quickly. Furthermore, the LockFileManager is not
robust in every environment. Other -cc1 commands can stall until
timeout (after about eight minutes).
This commit changes the lock file from being necessary for correctness
to a (possibly dubious) performance hack. The remaining benefit is to
reduce duplicate work in competing -cc1 commands which depend on the
same module. Follow-up commits will change the internal build system to
continue after a timeout, and reduce the timeout. Perhaps we should
reconsider blocking at all.
This also fixes a use-after-free, when one part of a compilation
validates a PCM and starts using it, and another tries to swap out the
PCM for something new.
The PCMCache is a new type called MemoryBufferCache, which saves memory
buffers based on their filename. Its ownership is shared by the
CompilerInstance and ModuleManager.
- The ModuleManager stores PCMs there that it loads from disk, never
touching the disk if the cache is hot.
- When modules fail to validate, they're removed from the cache.
- When a CompilerInstance is spawned to build a new module, each
already-loaded PCM is assumed to be valid, and is frozen to avoid
the use-after-free.
- Any newly-built module is written directly to the cache to avoid the
round-trip to the filesystem, making lock files unnecessary for
correctness.
Original patch by Manman Ren; most testcases by Adrian Prantl!
llvm-svn: 298278
2017-03-21 01:58:26 +08:00
|
|
|
SourceManager &SM, MemoryBufferCache &PCMCache,
|
|
|
|
HeaderSearch &Headers, ModuleLoader &TheModuleLoader,
|
2013-01-17 07:13:36 +08:00
|
|
|
IdentifierInfoLookup *IILookup, bool OwnsHeaders,
|
2014-05-02 11:43:30 +08:00
|
|
|
TranslationUnitKind TUKind)
|
2017-12-05 07:16:21 +08:00
|
|
|
: PPOpts(std::move(PPOpts)), Diags(&diags), LangOpts(opts),
|
2018-04-17 05:07:08 +08:00
|
|
|
FileMgr(Headers.getFileMgr()), SourceMgr(SM), PCMCache(PCMCache),
|
|
|
|
ScratchBuf(new ScratchBuffer(SourceMgr)), HeaderInfo(Headers),
|
|
|
|
TheModuleLoader(TheModuleLoader), ExternalSource(nullptr),
|
|
|
|
// As the language options may have not been loaded yet (when
|
|
|
|
// deserializing an ASTUnit), adding keywords to the identifier table is
|
|
|
|
// deferred to Preprocessor::Initialize().
|
|
|
|
Identifiers(IILookup), PragmaHandlers(new PragmaNamespace(StringRef())),
|
|
|
|
TUKind(TUKind), SkipMainFilePreamble(0, true),
|
2017-12-05 07:16:21 +08:00
|
|
|
CurSubmoduleState(&NullSubmoduleState) {
|
2009-11-12 05:44:21 +08:00
|
|
|
OwnsHeaderSearch = OwnsHeaders;
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2006-11-21 14:17:10 +08:00
|
|
|
// Default to discarding comments.
|
|
|
|
KeepComments = false;
|
|
|
|
KeepMacroComments = false;
|
2011-09-01 02:45:31 +08:00
|
|
|
SuppressIncludeNotFoundError = false;
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// Macro expansion is enabled.
|
|
|
|
DisableMacroExpansion = false;
|
2012-06-07 02:52:13 +08:00
|
|
|
MacroExpansionInDirectivesOverride = false;
|
2006-07-15 15:42:55 +08:00
|
|
|
InMacroArgs = false;
|
2012-04-04 00:47:40 +08:00
|
|
|
InMacroArgPreExpansion = false;
|
2008-03-09 10:26:03 +08:00
|
|
|
NumCachedTokenLexers = 0;
|
2012-06-09 02:06:21 +08:00
|
|
|
PragmasEnabled = true;
|
2013-01-17 04:09:36 +08:00
|
|
|
ParsingIfOrElifDirective = false;
|
2013-02-01 03:26:01 +08:00
|
|
|
PreprocessedOutput = false;
|
2012-06-09 02:06:21 +08:00
|
|
|
|
2010-01-05 03:18:44 +08:00
|
|
|
// We haven't read anything from the external source.
|
|
|
|
ReadMacrosFromExternalSource = false;
|
2017-10-15 09:26:26 +08:00
|
|
|
|
|
|
|
// "Poison" __VA_ARGS__, __VA_OPT__ which can only appear in the expansion of
|
|
|
|
// a macro. They get unpoisoned where it is allowed.
|
2006-07-06 13:17:39 +08:00
|
|
|
(Ident__VA_ARGS__ = getIdentifierInfo("__VA_ARGS__"))->setIsPoisoned();
|
2011-04-28 09:08:34 +08:00
|
|
|
SetPoisonReason(Ident__VA_ARGS__,diag::ext_pp_bad_vaargs_use);
|
2017-10-15 09:26:26 +08:00
|
|
|
if (getLangOpts().CPlusPlus2a) {
|
|
|
|
(Ident__VA_OPT__ = getIdentifierInfo("__VA_OPT__"))->setIsPoisoned();
|
|
|
|
SetPoisonReason(Ident__VA_OPT__,diag::ext_pp_bad_vaopt_use);
|
|
|
|
} else {
|
|
|
|
Ident__VA_OPT__ = nullptr;
|
|
|
|
}
|
|
|
|
|
2006-06-25 05:31:03 +08:00
|
|
|
// Initialize the pragma handlers.
|
|
|
|
RegisterBuiltinPragmas();
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2006-06-28 13:26:32 +08:00
|
|
|
// Initialize builtin macros like __LINE__ and friends.
|
|
|
|
RegisterBuiltinMacros();
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2012-03-11 15:00:24 +08:00
|
|
|
if(LangOpts.Borland) {
|
2011-04-28 09:08:34 +08:00
|
|
|
Ident__exception_info = getIdentifierInfo("_exception_info");
|
|
|
|
Ident___exception_info = getIdentifierInfo("__exception_info");
|
|
|
|
Ident_GetExceptionInfo = getIdentifierInfo("GetExceptionInformation");
|
|
|
|
Ident__exception_code = getIdentifierInfo("_exception_code");
|
|
|
|
Ident___exception_code = getIdentifierInfo("__exception_code");
|
|
|
|
Ident_GetExceptionCode = getIdentifierInfo("GetExceptionCode");
|
|
|
|
Ident__abnormal_termination = getIdentifierInfo("_abnormal_termination");
|
|
|
|
Ident___abnormal_termination = getIdentifierInfo("__abnormal_termination");
|
|
|
|
Ident_AbnormalTermination = getIdentifierInfo("AbnormalTermination");
|
|
|
|
} else {
|
2014-05-18 07:10:59 +08:00
|
|
|
Ident__exception_info = Ident__exception_code = nullptr;
|
|
|
|
Ident__abnormal_termination = Ident___exception_info = nullptr;
|
|
|
|
Ident___exception_code = Ident___abnormal_termination = nullptr;
|
|
|
|
Ident_GetExceptionInfo = Ident_GetExceptionCode = nullptr;
|
|
|
|
Ident_AbnormalTermination = nullptr;
|
2012-01-30 14:01:29 +08:00
|
|
|
}
|
2017-05-30 19:54:55 +08:00
|
|
|
|
2018-09-12 01:10:44 +08:00
|
|
|
// If using a PCH where a #pragma hdrstop is expected, start skipping tokens.
|
|
|
|
if (usingPCHWithPragmaHdrStop())
|
|
|
|
SkippingUntilPragmaHdrStop = true;
|
|
|
|
|
2018-07-06 01:22:13 +08:00
|
|
|
// If using a PCH with a through header, start skipping tokens.
|
|
|
|
if (!this->PPOpts->PCHThroughHeader.empty() &&
|
|
|
|
!this->PPOpts->ImplicitPCHInclude.empty())
|
|
|
|
SkippingUntilPCHThroughHeader = true;
|
|
|
|
|
2017-05-30 19:54:55 +08:00
|
|
|
if (this->PPOpts->GeneratePreamble)
|
|
|
|
PreambleConditionalStack.startRecording();
|
2012-06-03 02:08:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Preprocessor::~Preprocessor() {
|
|
|
|
assert(BacktrackPositions.empty() && "EnableBacktrack/Backtrack imbalance!");
|
|
|
|
|
2014-03-16 00:40:40 +08:00
|
|
|
IncludeMacroStack.clear();
|
2012-06-03 02:08:09 +08:00
|
|
|
|
2014-07-24 11:25:00 +08:00
|
|
|
// Destroy any macro definitions.
|
|
|
|
while (MacroInfoChain *I = MIChainHead) {
|
|
|
|
MIChainHead = I->Next;
|
|
|
|
I->~MacroInfoChain();
|
|
|
|
}
|
2012-06-03 02:08:09 +08:00
|
|
|
|
|
|
|
// Free any cached macro expanders.
|
Don't leak MacroArgs when using code completion, PR19688.
MacroArgs are owned by TokenLexer, and when a TokenLexer is destroyed, it'll
call its MacroArgs's destroy() method. destroy() only appends the MacroArg to
Preprocessor's MacroArgCache list, and Preprocessor's destructor then calls
deallocate() on all MacroArgs in that list. This method then ends up freeing
the MacroArgs's memory.
In a code completion context, Parser::cutOffParsing() gets called when a code
completion token is hit, which changes the type of the current token to
tok::eof. eof tokens aren't always ConsumeToken()ed, so
Preprocessor::HandleEndOfFile() isn't always called, and that function is
responsible for popping the macro stack.
Due to this, Preprocessor::CurTokenLexer can be non-NULL when
~Preprocessor runs. It's a unique_ptr, so it ended up being destructed after
~Preprocessor completed, and its MacroArgs thus got added to the freelist after
the code freeing things on the freelist had already completed. The fix is to
explicitly call reset() before the freelist processing happens. (See the bug
for more notes.)
llvm-svn: 208438
2014-05-10 02:09:42 +08:00
|
|
|
// This populates MacroArgCache, so all TokenLexers need to be destroyed
|
|
|
|
// before the code below that frees up the MacroArgCache list.
|
2014-08-30 03:36:52 +08:00
|
|
|
std::fill(TokenLexerCache, TokenLexerCache + NumCachedTokenLexers, nullptr);
|
Don't leak MacroArgs when using code completion, PR19688.
MacroArgs are owned by TokenLexer, and when a TokenLexer is destroyed, it'll
call its MacroArgs's destroy() method. destroy() only appends the MacroArg to
Preprocessor's MacroArgCache list, and Preprocessor's destructor then calls
deallocate() on all MacroArgs in that list. This method then ends up freeing
the MacroArgs's memory.
In a code completion context, Parser::cutOffParsing() gets called when a code
completion token is hit, which changes the type of the current token to
tok::eof. eof tokens aren't always ConsumeToken()ed, so
Preprocessor::HandleEndOfFile() isn't always called, and that function is
responsible for popping the macro stack.
Due to this, Preprocessor::CurTokenLexer can be non-NULL when
~Preprocessor runs. It's a unique_ptr, so it ended up being destructed after
~Preprocessor completed, and its MacroArgs thus got added to the freelist after
the code freeing things on the freelist had already completed. The fix is to
explicitly call reset() before the freelist processing happens. (See the bug
for more notes.)
llvm-svn: 208438
2014-05-10 02:09:42 +08:00
|
|
|
CurTokenLexer.reset();
|
2012-06-03 02:08:09 +08:00
|
|
|
|
|
|
|
// Free any cached MacroArgs.
|
Don't leak MacroArgs when using code completion, PR19688.
MacroArgs are owned by TokenLexer, and when a TokenLexer is destroyed, it'll
call its MacroArgs's destroy() method. destroy() only appends the MacroArg to
Preprocessor's MacroArgCache list, and Preprocessor's destructor then calls
deallocate() on all MacroArgs in that list. This method then ends up freeing
the MacroArgs's memory.
In a code completion context, Parser::cutOffParsing() gets called when a code
completion token is hit, which changes the type of the current token to
tok::eof. eof tokens aren't always ConsumeToken()ed, so
Preprocessor::HandleEndOfFile() isn't always called, and that function is
responsible for popping the macro stack.
Due to this, Preprocessor::CurTokenLexer can be non-NULL when
~Preprocessor runs. It's a unique_ptr, so it ended up being destructed after
~Preprocessor completed, and its MacroArgs thus got added to the freelist after
the code freeing things on the freelist had already completed. The fix is to
explicitly call reset() before the freelist processing happens. (See the bug
for more notes.)
llvm-svn: 208438
2014-05-10 02:09:42 +08:00
|
|
|
for (MacroArgs *ArgList = MacroArgCache; ArgList;)
|
2012-06-03 02:08:09 +08:00
|
|
|
ArgList = ArgList->deallocate();
|
|
|
|
|
|
|
|
// Delete the header search info, if we own it.
|
|
|
|
if (OwnsHeaderSearch)
|
|
|
|
delete &HeaderInfo;
|
|
|
|
}
|
|
|
|
|
2015-09-23 01:23:22 +08:00
|
|
|
void Preprocessor::Initialize(const TargetInfo &Target,
|
|
|
|
const TargetInfo *AuxTarget) {
|
2012-06-03 02:08:09 +08:00
|
|
|
assert((!this->Target || this->Target == &Target) &&
|
|
|
|
"Invalid override of target information");
|
|
|
|
this->Target = &Target;
|
2015-09-23 01:23:22 +08:00
|
|
|
|
|
|
|
assert((!this->AuxTarget || this->AuxTarget == AuxTarget) &&
|
|
|
|
"Invalid override of aux target information.");
|
|
|
|
this->AuxTarget = AuxTarget;
|
|
|
|
|
2012-06-03 02:08:09 +08:00
|
|
|
// Initialize information about built-ins.
|
2015-09-23 01:23:22 +08:00
|
|
|
BuiltinInfo.InitializeTarget(Target, AuxTarget);
|
2012-01-30 14:01:29 +08:00
|
|
|
HeaderInfo.setTarget(Target);
|
2018-04-17 05:07:08 +08:00
|
|
|
|
|
|
|
// Populate the identifier table with info about keywords for the current language.
|
|
|
|
Identifiers.AddKeywords(LangOpts);
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
|
Add support for the static analyzer to synthesize function implementations from external model files.
Currently the analyzer lazily models some functions using 'BodyFarm',
which constructs a fake function implementation that the analyzer
can simulate that approximates the semantics of the function when
it is called. BodyFarm does this by constructing the AST for
such definitions on-the-fly. One strength of BodyFarm
is that all symbols and types referenced by synthesized function
bodies are contextual adapted to the containing translation unit.
The downside is that these ASTs are hardcoded in Clang's own
source code.
A more scalable model is to allow these models to be defined as source
code in separate "model" files and have the analyzer use those
definitions lazily when a function body is needed. Among other things,
it will allow more customization of the analyzer for specific APIs
and platforms.
This patch provides the initial infrastructure for this feature.
It extends BodyFarm to use an abstract API 'CodeInjector' that can be
used to synthesize function bodies. That 'CodeInjector' is
implemented using a new 'ModelInjector' in libFrontend, which lazily
parses a model file and injects the ASTs into the current translation
unit.
Models are currently found by specifying a 'model-path' as an
analyzer option; if no path is specified the CodeInjector is not
used, thus defaulting to the current behavior in the analyzer.
Models currently contain a single function definition, and can
be found by finding the file <function name>.model. This is an
initial starting point for something more rich, but it bootstraps
this feature for future evolution.
This patch was contributed by Gábor Horváth as part of his
Google Summer of Code project.
Some notes:
- This introduces the notion of a "model file" into
FrontendAction and the Preprocessor. This nomenclature
is specific to the static analyzer, but possibly could be
generalized. Essentially these are sources pulled in
exogenously from the principal translation.
Preprocessor gets a 'InitializeForModelFile' and
'FinalizeForModelFile' which could possibly be hoisted out
of Preprocessor if Preprocessor exposed a new API to
change the PragmaHandlers and some other internal pieces. This
can be revisited.
FrontendAction gets a 'isModelParsingAction()' predicate function
used to allow a new FrontendAction to recycle the Preprocessor
and ASTContext. This name could probably be made something
more general (i.e., not tied to 'model files') at the expense
of losing the intent of why it exists. This can be revisited.
- This is a moderate sized patch; it has gone through some amount of
offline code review. Most of the changes to the non-analyzer
parts are fairly small, and would make little sense without
the analyzer changes.
- Most of the analyzer changes are plumbing, with the interesting
behavior being introduced by ModelInjector.cpp and
ModelConsumer.cpp.
- The new functionality introduced by this change is off-by-default.
It requires an analyzer config option to enable.
llvm-svn: 216550
2014-08-27 23:14:15 +08:00
|
|
|
void Preprocessor::InitializeForModelFile() {
|
|
|
|
NumEnteredSourceFiles = 0;
|
|
|
|
|
|
|
|
// Reset pragmas
|
2014-09-16 05:31:42 +08:00
|
|
|
PragmaHandlersBackup = std::move(PragmaHandlers);
|
2014-09-12 13:19:24 +08:00
|
|
|
PragmaHandlers = llvm::make_unique<PragmaNamespace>(StringRef());
|
Add support for the static analyzer to synthesize function implementations from external model files.
Currently the analyzer lazily models some functions using 'BodyFarm',
which constructs a fake function implementation that the analyzer
can simulate that approximates the semantics of the function when
it is called. BodyFarm does this by constructing the AST for
such definitions on-the-fly. One strength of BodyFarm
is that all symbols and types referenced by synthesized function
bodies are contextual adapted to the containing translation unit.
The downside is that these ASTs are hardcoded in Clang's own
source code.
A more scalable model is to allow these models to be defined as source
code in separate "model" files and have the analyzer use those
definitions lazily when a function body is needed. Among other things,
it will allow more customization of the analyzer for specific APIs
and platforms.
This patch provides the initial infrastructure for this feature.
It extends BodyFarm to use an abstract API 'CodeInjector' that can be
used to synthesize function bodies. That 'CodeInjector' is
implemented using a new 'ModelInjector' in libFrontend, which lazily
parses a model file and injects the ASTs into the current translation
unit.
Models are currently found by specifying a 'model-path' as an
analyzer option; if no path is specified the CodeInjector is not
used, thus defaulting to the current behavior in the analyzer.
Models currently contain a single function definition, and can
be found by finding the file <function name>.model. This is an
initial starting point for something more rich, but it bootstraps
this feature for future evolution.
This patch was contributed by Gábor Horváth as part of his
Google Summer of Code project.
Some notes:
- This introduces the notion of a "model file" into
FrontendAction and the Preprocessor. This nomenclature
is specific to the static analyzer, but possibly could be
generalized. Essentially these are sources pulled in
exogenously from the principal translation.
Preprocessor gets a 'InitializeForModelFile' and
'FinalizeForModelFile' which could possibly be hoisted out
of Preprocessor if Preprocessor exposed a new API to
change the PragmaHandlers and some other internal pieces. This
can be revisited.
FrontendAction gets a 'isModelParsingAction()' predicate function
used to allow a new FrontendAction to recycle the Preprocessor
and ASTContext. This name could probably be made something
more general (i.e., not tied to 'model files') at the expense
of losing the intent of why it exists. This can be revisited.
- This is a moderate sized patch; it has gone through some amount of
offline code review. Most of the changes to the non-analyzer
parts are fairly small, and would make little sense without
the analyzer changes.
- Most of the analyzer changes are plumbing, with the interesting
behavior being introduced by ModelInjector.cpp and
ModelConsumer.cpp.
- The new functionality introduced by this change is off-by-default.
It requires an analyzer config option to enable.
llvm-svn: 216550
2014-08-27 23:14:15 +08:00
|
|
|
RegisterBuiltinPragmas();
|
|
|
|
|
|
|
|
// Reset PredefinesFileID
|
|
|
|
PredefinesFileID = FileID();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Preprocessor::FinalizeForModelFile() {
|
|
|
|
NumEnteredSourceFiles = 1;
|
|
|
|
|
2014-09-16 05:31:42 +08:00
|
|
|
PragmaHandlers = std::move(PragmaHandlersBackup);
|
Add support for the static analyzer to synthesize function implementations from external model files.
Currently the analyzer lazily models some functions using 'BodyFarm',
which constructs a fake function implementation that the analyzer
can simulate that approximates the semantics of the function when
it is called. BodyFarm does this by constructing the AST for
such definitions on-the-fly. One strength of BodyFarm
is that all symbols and types referenced by synthesized function
bodies are contextual adapted to the containing translation unit.
The downside is that these ASTs are hardcoded in Clang's own
source code.
A more scalable model is to allow these models to be defined as source
code in separate "model" files and have the analyzer use those
definitions lazily when a function body is needed. Among other things,
it will allow more customization of the analyzer for specific APIs
and platforms.
This patch provides the initial infrastructure for this feature.
It extends BodyFarm to use an abstract API 'CodeInjector' that can be
used to synthesize function bodies. That 'CodeInjector' is
implemented using a new 'ModelInjector' in libFrontend, which lazily
parses a model file and injects the ASTs into the current translation
unit.
Models are currently found by specifying a 'model-path' as an
analyzer option; if no path is specified the CodeInjector is not
used, thus defaulting to the current behavior in the analyzer.
Models currently contain a single function definition, and can
be found by finding the file <function name>.model. This is an
initial starting point for something more rich, but it bootstraps
this feature for future evolution.
This patch was contributed by Gábor Horváth as part of his
Google Summer of Code project.
Some notes:
- This introduces the notion of a "model file" into
FrontendAction and the Preprocessor. This nomenclature
is specific to the static analyzer, but possibly could be
generalized. Essentially these are sources pulled in
exogenously from the principal translation.
Preprocessor gets a 'InitializeForModelFile' and
'FinalizeForModelFile' which could possibly be hoisted out
of Preprocessor if Preprocessor exposed a new API to
change the PragmaHandlers and some other internal pieces. This
can be revisited.
FrontendAction gets a 'isModelParsingAction()' predicate function
used to allow a new FrontendAction to recycle the Preprocessor
and ASTContext. This name could probably be made something
more general (i.e., not tied to 'model files') at the expense
of losing the intent of why it exists. This can be revisited.
- This is a moderate sized patch; it has gone through some amount of
offline code review. Most of the changes to the non-analyzer
parts are fairly small, and would make little sense without
the analyzer changes.
- Most of the analyzer changes are plumbing, with the interesting
behavior being introduced by ModelInjector.cpp and
ModelConsumer.cpp.
- The new functionality introduced by this change is off-by-default.
It requires an analyzer config option to enable.
llvm-svn: 216550
2014-08-27 23:14:15 +08:00
|
|
|
}
|
|
|
|
|
2007-07-21 00:59:19 +08:00
|
|
|
void Preprocessor::DumpToken(const Token &Tok, bool DumpFlags) const {
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << tok::getTokenName(Tok.getKind()) << " '"
|
|
|
|
<< getSpelling(Tok) << "'";
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-19 00:22:51 +08:00
|
|
|
if (!DumpFlags) return;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << "\t";
|
2006-06-19 00:22:51 +08:00
|
|
|
if (Tok.isAtStartOfLine())
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << " [StartOfLine]";
|
2006-06-19 00:22:51 +08:00
|
|
|
if (Tok.hasLeadingSpace())
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << " [LeadingSpace]";
|
2006-07-27 14:59:25 +08:00
|
|
|
if (Tok.isExpandDisabled())
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << " [ExpandDisabled]";
|
2006-06-19 00:22:51 +08:00
|
|
|
if (Tok.needsCleaning()) {
|
2006-06-19 00:32:35 +08:00
|
|
|
const char *Start = SourceMgr.getCharacterData(Tok.getLocation());
|
2011-07-23 18:55:15 +08:00
|
|
|
llvm::errs() << " [UnClean='" << StringRef(Start, Tok.getLength())
|
2009-08-23 20:08:50 +08:00
|
|
|
<< "']";
|
2006-06-19 00:22:51 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << "\tLoc=<";
|
2007-12-10 04:31:55 +08:00
|
|
|
DumpLocation(Tok.getLocation());
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << ">";
|
2007-12-10 04:31:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Preprocessor::DumpLocation(SourceLocation Loc) const {
|
2018-08-16 04:32:06 +08:00
|
|
|
Loc.print(llvm::errs(), SourceMgr);
|
2006-06-19 00:22:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Preprocessor::DumpMacro(const MacroInfo &MI) const {
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << "MACRO: ";
|
2006-06-19 00:22:51 +08:00
|
|
|
for (unsigned i = 0, e = MI.getNumTokens(); i != e; ++i) {
|
|
|
|
DumpToken(MI.getReplacementToken(i));
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << " ";
|
2006-06-19 00:22:51 +08:00
|
|
|
}
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << "\n";
|
2006-06-19 00:22:51 +08:00
|
|
|
}
|
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
void Preprocessor::PrintStats() {
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << "\n*** Preprocessor Stats:\n";
|
|
|
|
llvm::errs() << NumDirectives << " directives found:\n";
|
|
|
|
llvm::errs() << " " << NumDefined << " #define.\n";
|
|
|
|
llvm::errs() << " " << NumUndefined << " #undef.\n";
|
|
|
|
llvm::errs() << " #include/#include_next/#import:\n";
|
|
|
|
llvm::errs() << " " << NumEnteredSourceFiles << " source files entered.\n";
|
|
|
|
llvm::errs() << " " << MaxIncludeStackDepth << " max include stack depth\n";
|
|
|
|
llvm::errs() << " " << NumIf << " #if/#ifndef/#ifdef.\n";
|
|
|
|
llvm::errs() << " " << NumElse << " #else/#elif.\n";
|
|
|
|
llvm::errs() << " " << NumEndif << " #endif.\n";
|
|
|
|
llvm::errs() << " " << NumPragma << " #pragma.\n";
|
|
|
|
llvm::errs() << NumSkipped << " #if/#ifndef#ifdef regions skipped\n";
|
|
|
|
|
|
|
|
llvm::errs() << NumMacroExpanded << "/" << NumFnMacroExpanded << "/"
|
2008-01-15 00:44:48 +08:00
|
|
|
<< NumBuiltinMacroExpanded << " obj/fn/builtin macros expanded, "
|
|
|
|
<< NumFastMacroExpanded << " on the fast path.\n";
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << (NumFastTokenPaste+NumTokenPaste)
|
2008-01-15 00:44:48 +08:00
|
|
|
<< " token paste (##) operations performed, "
|
|
|
|
<< NumFastTokenPaste << " on the fast path.\n";
|
2012-08-13 18:46:42 +08:00
|
|
|
|
|
|
|
llvm::errs() << "\nPreprocessor Memory: " << getTotalMemory() << "B total";
|
|
|
|
|
|
|
|
llvm::errs() << "\n BumpPtr: " << BP.getTotalMemory();
|
|
|
|
llvm::errs() << "\n Macro Expanded Tokens: "
|
|
|
|
<< llvm::capacity_in_bytes(MacroExpandedTokens);
|
|
|
|
llvm::errs() << "\n Predefines Buffer: " << Predefines.capacity();
|
2015-05-21 09:20:10 +08:00
|
|
|
// FIXME: List information for all submodules.
|
|
|
|
llvm::errs() << "\n Macros: "
|
|
|
|
<< llvm::capacity_in_bytes(CurSubmoduleState->Macros);
|
2012-08-13 18:46:42 +08:00
|
|
|
llvm::errs() << "\n #pragma push_macro Info: "
|
|
|
|
<< llvm::capacity_in_bytes(PragmaPushMacroInfo);
|
|
|
|
llvm::errs() << "\n Poison Reasons: "
|
|
|
|
<< llvm::capacity_in_bytes(PoisonReasons);
|
|
|
|
llvm::errs() << "\n Comment Handlers: "
|
|
|
|
<< llvm::capacity_in_bytes(CommentHandlers) << "\n";
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
|
2010-03-13 18:17:05 +08:00
|
|
|
Preprocessor::macro_iterator
|
|
|
|
Preprocessor::macro_begin(bool IncludeExternalMacros) const {
|
|
|
|
if (IncludeExternalMacros && ExternalSource &&
|
2010-01-05 03:18:44 +08:00
|
|
|
!ReadMacrosFromExternalSource) {
|
|
|
|
ReadMacrosFromExternalSource = true;
|
|
|
|
ExternalSource->ReadDefinedMacros();
|
|
|
|
}
|
2010-03-13 18:17:05 +08:00
|
|
|
|
2015-06-25 03:27:02 +08:00
|
|
|
// Make sure we cover all macros in visible modules.
|
|
|
|
for (const ModuleMacro &Macro : ModuleMacros)
|
|
|
|
CurSubmoduleState->Macros.insert(std::make_pair(Macro.II, MacroState()));
|
|
|
|
|
2015-05-21 09:20:10 +08:00
|
|
|
return CurSubmoduleState->Macros.begin();
|
2010-01-05 03:18:44 +08:00
|
|
|
}
|
|
|
|
|
2011-06-30 06:20:04 +08:00
|
|
|
size_t Preprocessor::getTotalMemory() const {
|
2011-07-27 05:17:24 +08:00
|
|
|
return BP.getTotalMemory()
|
2011-07-28 02:41:23 +08:00
|
|
|
+ llvm::capacity_in_bytes(MacroExpandedTokens)
|
2011-07-27 05:17:24 +08:00
|
|
|
+ Predefines.capacity() /* Predefines buffer. */
|
2015-05-21 09:20:10 +08:00
|
|
|
// FIXME: Include sizes from all submodules, and include MacroInfo sizes,
|
|
|
|
// and ModuleMacros.
|
|
|
|
+ llvm::capacity_in_bytes(CurSubmoduleState->Macros)
|
2011-07-28 02:41:23 +08:00
|
|
|
+ llvm::capacity_in_bytes(PragmaPushMacroInfo)
|
|
|
|
+ llvm::capacity_in_bytes(PoisonReasons)
|
|
|
|
+ llvm::capacity_in_bytes(CommentHandlers);
|
2011-06-30 06:20:04 +08:00
|
|
|
}
|
|
|
|
|
2010-03-13 18:17:05 +08:00
|
|
|
Preprocessor::macro_iterator
|
|
|
|
Preprocessor::macro_end(bool IncludeExternalMacros) const {
|
|
|
|
if (IncludeExternalMacros && ExternalSource &&
|
2010-01-05 03:18:44 +08:00
|
|
|
!ReadMacrosFromExternalSource) {
|
|
|
|
ReadMacrosFromExternalSource = true;
|
|
|
|
ExternalSource->ReadDefinedMacros();
|
|
|
|
}
|
2010-03-13 18:17:05 +08:00
|
|
|
|
2015-05-21 09:20:10 +08:00
|
|
|
return CurSubmoduleState->Macros.end();
|
2010-01-05 03:18:44 +08:00
|
|
|
}
|
|
|
|
|
2018-05-09 09:00:01 +08:00
|
|
|
/// Compares macro tokens with a specified token value sequence.
|
2012-09-29 19:40:46 +08:00
|
|
|
static bool MacroDefinitionEquals(const MacroInfo *MI,
|
2013-01-13 03:30:44 +08:00
|
|
|
ArrayRef<TokenValue> Tokens) {
|
2012-09-29 19:40:46 +08:00
|
|
|
return Tokens.size() == MI->getNumTokens() &&
|
|
|
|
std::equal(Tokens.begin(), Tokens.end(), MI->tokens_begin());
|
|
|
|
}
|
|
|
|
|
|
|
|
StringRef Preprocessor::getLastMacroWithSpelling(
|
|
|
|
SourceLocation Loc,
|
|
|
|
ArrayRef<TokenValue> Tokens) const {
|
|
|
|
SourceLocation BestLocation;
|
|
|
|
StringRef BestSpelling;
|
|
|
|
for (Preprocessor::macro_iterator I = macro_begin(), E = macro_end();
|
|
|
|
I != E; ++I) {
|
2013-03-27 01:17:01 +08:00
|
|
|
const MacroDirective::DefInfo
|
2015-04-24 02:18:26 +08:00
|
|
|
Def = I->second.findDirectiveAtLoc(Loc, SourceMgr);
|
2015-03-05 00:03:07 +08:00
|
|
|
if (!Def || !Def.getMacroInfo())
|
|
|
|
continue;
|
|
|
|
if (!Def.getMacroInfo()->isObjectLike())
|
2012-09-29 19:40:46 +08:00
|
|
|
continue;
|
2013-03-27 01:17:01 +08:00
|
|
|
if (!MacroDefinitionEquals(Def.getMacroInfo(), Tokens))
|
2012-09-29 19:40:46 +08:00
|
|
|
continue;
|
2013-03-27 01:17:01 +08:00
|
|
|
SourceLocation Location = Def.getLocation();
|
2012-09-29 19:40:46 +08:00
|
|
|
// Choose the macro defined latest.
|
|
|
|
if (BestLocation.isInvalid() ||
|
|
|
|
(Location.isValid() &&
|
|
|
|
SourceMgr.isBeforeInTranslationUnit(BestLocation, Location))) {
|
|
|
|
BestLocation = Location;
|
|
|
|
BestSpelling = I->first->getName();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return BestSpelling;
|
|
|
|
}
|
|
|
|
|
2012-01-04 14:20:15 +08:00
|
|
|
void Preprocessor::recomputeCurLexerKind() {
|
|
|
|
if (CurLexer)
|
|
|
|
CurLexerKind = CLK_Lexer;
|
|
|
|
else if (CurTokenLexer)
|
|
|
|
CurLexerKind = CLK_TokenLexer;
|
2018-07-31 03:24:48 +08:00
|
|
|
else
|
2012-01-04 14:20:15 +08:00
|
|
|
CurLexerKind = CLK_CachingLexer;
|
|
|
|
}
|
|
|
|
|
2010-03-13 18:17:05 +08:00
|
|
|
bool Preprocessor::SetCodeCompletionPoint(const FileEntry *File,
|
2011-09-04 11:32:15 +08:00
|
|
|
unsigned CompleteLine,
|
|
|
|
unsigned CompleteColumn) {
|
|
|
|
assert(File);
|
|
|
|
assert(CompleteLine && CompleteColumn && "Starts from 1:1");
|
|
|
|
assert(!CodeCompletionFile && "Already set");
|
2009-12-02 14:49:09 +08:00
|
|
|
|
2011-09-04 11:32:15 +08:00
|
|
|
using llvm::MemoryBuffer;
|
2009-12-02 14:49:09 +08:00
|
|
|
|
|
|
|
// Load the actual file's contents.
|
2010-03-17 03:49:24 +08:00
|
|
|
bool Invalid = false;
|
|
|
|
const MemoryBuffer *Buffer = SourceMgr.getMemoryBufferForFile(File, &Invalid);
|
|
|
|
if (Invalid)
|
2009-12-02 14:49:09 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// Find the byte position of the truncation point.
|
|
|
|
const char *Position = Buffer->getBufferStart();
|
2011-09-04 11:32:15 +08:00
|
|
|
for (unsigned Line = 1; Line < CompleteLine; ++Line) {
|
2009-12-02 14:49:09 +08:00
|
|
|
for (; *Position; ++Position) {
|
|
|
|
if (*Position != '\r' && *Position != '\n')
|
|
|
|
continue;
|
2010-03-13 18:17:05 +08:00
|
|
|
|
2009-12-02 14:49:09 +08:00
|
|
|
// Eat \r\n or \n\r as a single line.
|
|
|
|
if ((Position[1] == '\r' || Position[1] == '\n') &&
|
|
|
|
Position[0] != Position[1])
|
|
|
|
++Position;
|
|
|
|
++Position;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2010-03-13 18:17:05 +08:00
|
|
|
|
2011-09-04 11:32:15 +08:00
|
|
|
Position += CompleteColumn - 1;
|
2014-10-18 14:23:50 +08:00
|
|
|
|
|
|
|
// If pointing inside the preamble, adjust the position at the beginning of
|
|
|
|
// the file after the preamble.
|
|
|
|
if (SkipMainFilePreamble.first &&
|
|
|
|
SourceMgr.getFileEntryForID(SourceMgr.getMainFileID()) == File) {
|
|
|
|
if (Position - Buffer->getBufferStart() < SkipMainFilePreamble.first)
|
|
|
|
Position = Buffer->getBufferStart() + SkipMainFilePreamble.first;
|
|
|
|
}
|
|
|
|
|
2014-10-18 14:19:36 +08:00
|
|
|
if (Position > Buffer->getBufferEnd())
|
|
|
|
Position = Buffer->getBufferEnd();
|
|
|
|
|
|
|
|
CodeCompletionFile = File;
|
|
|
|
CodeCompletionOffset = Position - Buffer->getBufferStart();
|
|
|
|
|
2017-12-20 19:34:38 +08:00
|
|
|
auto NewBuffer = llvm::WritableMemoryBuffer::getNewUninitMemBuffer(
|
|
|
|
Buffer->getBufferSize() + 1, Buffer->getBufferIdentifier());
|
|
|
|
char *NewBuf = NewBuffer->getBufferStart();
|
2014-10-18 14:19:36 +08:00
|
|
|
char *NewPos = std::copy(Buffer->getBufferStart(), Position, NewBuf);
|
|
|
|
*NewPos = '\0';
|
|
|
|
std::copy(Position, Buffer->getBufferEnd(), NewPos+1);
|
|
|
|
SourceMgr.overrideFileContents(File, std::move(NewBuffer));
|
2009-12-02 14:49:09 +08:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-09-18 16:40:41 +08:00
|
|
|
void Preprocessor::CodeCompleteIncludedFile(llvm::StringRef Dir,
|
|
|
|
bool IsAngled) {
|
|
|
|
if (CodeComplete)
|
|
|
|
CodeComplete->CodeCompleteIncludedFile(Dir, IsAngled);
|
|
|
|
setCodeCompletionReached();
|
|
|
|
}
|
|
|
|
|
2010-08-26 01:04:25 +08:00
|
|
|
void Preprocessor::CodeCompleteNaturalLanguage() {
|
|
|
|
if (CodeComplete)
|
|
|
|
CodeComplete->CodeCompleteNaturalLanguage();
|
2011-09-04 11:32:15 +08:00
|
|
|
setCodeCompletionReached();
|
2010-08-26 01:04:25 +08:00
|
|
|
}
|
|
|
|
|
2010-02-28 01:05:45 +08:00
|
|
|
/// getSpelling - This method is used to get the spelling of a token into a
|
|
|
|
/// SmallVector. Note that the returned StringRef may not point to the
|
|
|
|
/// supplied buffer if a copy can be avoided.
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef Preprocessor::getSpelling(const Token &Tok,
|
|
|
|
SmallVectorImpl<char> &Buffer,
|
2010-03-16 13:20:39 +08:00
|
|
|
bool *Invalid) const {
|
2010-12-22 16:23:18 +08:00
|
|
|
// NOTE: this has to be checked *before* testing for an IdentifierInfo.
|
2013-01-25 04:50:46 +08:00
|
|
|
if (Tok.isNot(tok::raw_identifier) && !Tok.hasUCN()) {
|
2010-12-22 16:23:18 +08:00
|
|
|
// Try the fast path.
|
|
|
|
if (const IdentifierInfo *II = Tok.getIdentifierInfo())
|
|
|
|
return II->getName();
|
|
|
|
}
|
2010-02-28 01:05:45 +08:00
|
|
|
|
|
|
|
// Resize the buffer if we need to copy into it.
|
|
|
|
if (Tok.needsCleaning())
|
|
|
|
Buffer.resize(Tok.getLength());
|
|
|
|
|
|
|
|
const char *Ptr = Buffer.data();
|
2010-03-16 13:20:39 +08:00
|
|
|
unsigned Len = getSpelling(Tok, Ptr, Invalid);
|
2011-07-23 18:55:15 +08:00
|
|
|
return StringRef(Ptr, Len);
|
2010-02-28 01:05:45 +08:00
|
|
|
}
|
|
|
|
|
2006-07-14 14:54:10 +08:00
|
|
|
/// CreateString - Plop the specified string into a scratch buffer and return a
|
|
|
|
/// location for it. If specified, the source location provides a source
|
|
|
|
/// location for the token.
|
2012-09-25 05:07:17 +08:00
|
|
|
void Preprocessor::CreateString(StringRef Str, Token &Tok,
|
2011-10-04 02:39:03 +08:00
|
|
|
SourceLocation ExpansionLocStart,
|
|
|
|
SourceLocation ExpansionLocEnd) {
|
2012-09-25 05:07:17 +08:00
|
|
|
Tok.setLength(Str.size());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-27 03:29:26 +08:00
|
|
|
const char *DestPtr;
|
2012-09-25 05:07:17 +08:00
|
|
|
SourceLocation Loc = ScratchBuf->getToken(Str.data(), Str.size(), DestPtr);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-10-04 02:39:03 +08:00
|
|
|
if (ExpansionLocStart.isValid())
|
|
|
|
Loc = SourceMgr.createExpansionLoc(Loc, ExpansionLocStart,
|
2012-09-25 05:07:17 +08:00
|
|
|
ExpansionLocEnd, Str.size());
|
2009-01-27 03:29:26 +08:00
|
|
|
Tok.setLocation(Loc);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-12-22 16:23:18 +08:00
|
|
|
// If this is a raw identifier or a literal token, set the pointer data.
|
|
|
|
if (Tok.is(tok::raw_identifier))
|
|
|
|
Tok.setRawIdentifierData(DestPtr);
|
|
|
|
else if (Tok.isLiteral())
|
2009-01-27 03:29:26 +08:00
|
|
|
Tok.setLiteralData(DestPtr);
|
2006-07-14 14:54:10 +08:00
|
|
|
}
|
|
|
|
|
2018-04-30 13:25:48 +08:00
|
|
|
SourceLocation Preprocessor::SplitToken(SourceLocation Loc, unsigned Length) {
|
|
|
|
auto &SM = getSourceManager();
|
|
|
|
SourceLocation SpellingLoc = SM.getSpellingLoc(Loc);
|
|
|
|
std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(SpellingLoc);
|
|
|
|
bool Invalid = false;
|
|
|
|
StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
|
|
|
|
if (Invalid)
|
|
|
|
return SourceLocation();
|
|
|
|
|
|
|
|
// FIXME: We could consider re-using spelling for tokens we see repeatedly.
|
|
|
|
const char *DestPtr;
|
|
|
|
SourceLocation Spelling =
|
|
|
|
ScratchBuf->getToken(Buffer.data() + LocInfo.second, Length, DestPtr);
|
|
|
|
return SM.createTokenSplitLoc(Spelling, Loc, Loc.getLocWithOffset(Length));
|
|
|
|
}
|
|
|
|
|
2011-12-02 09:47:07 +08:00
|
|
|
Module *Preprocessor::getCurrentModule() {
|
2016-08-26 08:14:38 +08:00
|
|
|
if (!getLangOpts().isCompilingModule())
|
2014-05-18 07:10:59 +08:00
|
|
|
return nullptr;
|
|
|
|
|
2012-03-11 15:00:24 +08:00
|
|
|
return getHeaderSearchInfo().lookupModule(getLangOpts().CurrentModule);
|
2011-12-02 09:47:07 +08:00
|
|
|
}
|
2007-07-16 14:48:38 +08:00
|
|
|
|
2007-10-10 06:10:18 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Preprocessor Initialization Methods
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// EnterMainSourceFile - Enter the specified FileID as the main source file,
|
2008-01-07 12:01:26 +08:00
|
|
|
/// which implicitly adds the builtin defines etc.
|
2010-04-21 04:35:58 +08:00
|
|
|
void Preprocessor::EnterMainSourceFile() {
|
2009-02-14 03:33:24 +08:00
|
|
|
// We do not allow the preprocessor to reenter the main file. Doing so will
|
|
|
|
// cause FileID's to accumulate information from both runs (e.g. #line
|
|
|
|
// information) and predefined macros aren't guaranteed to be set properly.
|
|
|
|
assert(NumEnteredSourceFiles == 0 && "Cannot reenter the main file!");
|
2009-01-17 14:22:33 +08:00
|
|
|
FileID MainFileID = SourceMgr.getMainFileID();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-01-06 05:36:25 +08:00
|
|
|
// If MainFileID is loaded it means we loaded an AST file, no need to enter
|
|
|
|
// a main file.
|
|
|
|
if (!SourceMgr.isLoadedFileID(MainFileID)) {
|
|
|
|
// Enter the main file source buffer.
|
2014-05-18 07:10:59 +08:00
|
|
|
EnterSourceFile(MainFileID, nullptr, SourceLocation());
|
|
|
|
|
2012-01-06 05:36:25 +08:00
|
|
|
// If we've been asked to skip bytes in the main file (e.g., as part of a
|
|
|
|
// precompiled preamble), do so now.
|
|
|
|
if (SkipMainFilePreamble.first > 0)
|
2017-09-21 03:03:37 +08:00
|
|
|
CurLexer->SetByteOffset(SkipMainFilePreamble.first,
|
|
|
|
SkipMainFilePreamble.second);
|
|
|
|
|
2012-01-06 05:36:25 +08:00
|
|
|
// Tell the header info that the main file was entered. If the file is later
|
|
|
|
// #imported, it won't be re-entered.
|
|
|
|
if (const FileEntry *FE = SourceMgr.getFileEntryForID(MainFileID))
|
|
|
|
HeaderInfo.IncrementIncludeCount(FE);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-12-31 23:33:09 +08:00
|
|
|
// Preprocess Predefines to populate the initial preprocessor state.
|
2014-08-28 04:03:29 +08:00
|
|
|
std::unique_ptr<llvm::MemoryBuffer> SB =
|
2010-04-06 06:42:27 +08:00
|
|
|
llvm::MemoryBuffer::getMemBufferCopy(Predefines, "<built-in>");
|
2010-08-26 22:07:34 +08:00
|
|
|
assert(SB && "Cannot create predefined source buffer");
|
2014-08-29 15:59:55 +08:00
|
|
|
FileID FID = SourceMgr.createFileID(std::move(SB));
|
2015-10-03 18:46:20 +08:00
|
|
|
assert(FID.isValid() && "Could not create FileID for predefines?");
|
2013-02-02 00:36:07 +08:00
|
|
|
setPredefinesFileID(FID);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-10-10 06:10:18 +08:00
|
|
|
// Start parsing the predefines.
|
2014-05-18 07:10:59 +08:00
|
|
|
EnterSourceFile(FID, nullptr, SourceLocation());
|
2018-07-06 01:22:13 +08:00
|
|
|
|
|
|
|
if (!PPOpts->PCHThroughHeader.empty()) {
|
|
|
|
// Lookup and save the FileID for the through header. If it isn't found
|
|
|
|
// in the search path, it's a fatal error.
|
|
|
|
const DirectoryLookup *CurDir;
|
|
|
|
const FileEntry *File = LookupFile(
|
|
|
|
SourceLocation(), PPOpts->PCHThroughHeader,
|
|
|
|
/*isAngled=*/false, /*FromDir=*/nullptr, /*FromFile=*/nullptr, CurDir,
|
|
|
|
/*SearchPath=*/nullptr, /*RelativePath=*/nullptr,
|
|
|
|
/*SuggestedModule=*/nullptr, /*IsMapped=*/nullptr);
|
|
|
|
if (!File) {
|
|
|
|
Diag(SourceLocation(), diag::err_pp_through_header_not_found)
|
|
|
|
<< PPOpts->PCHThroughHeader;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
setPCHThroughHeaderFileID(
|
|
|
|
SourceMgr.createFileID(File, SourceLocation(), SrcMgr::C_User));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Skip tokens from the Predefines and if needed the main file.
|
2018-09-12 01:10:44 +08:00
|
|
|
if ((usingPCHWithThroughHeader() && SkippingUntilPCHThroughHeader) ||
|
|
|
|
(usingPCHWithPragmaHdrStop() && SkippingUntilPragmaHdrStop))
|
|
|
|
SkipTokensWhileUsingPCH();
|
2018-07-06 01:22:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Preprocessor::setPCHThroughHeaderFileID(FileID FID) {
|
|
|
|
assert(PCHThroughHeaderFileID.isInvalid() &&
|
|
|
|
"PCHThroughHeaderFileID already set!");
|
|
|
|
PCHThroughHeaderFileID = FID;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Preprocessor::isPCHThroughHeader(const FileEntry *FE) {
|
|
|
|
assert(PCHThroughHeaderFileID.isValid() &&
|
|
|
|
"Invalid PCH through header FileID");
|
|
|
|
return FE == SourceMgr.getFileEntryForID(PCHThroughHeaderFileID);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Preprocessor::creatingPCHWithThroughHeader() {
|
|
|
|
return TUKind == TU_Prefix && !PPOpts->PCHThroughHeader.empty() &&
|
|
|
|
PCHThroughHeaderFileID.isValid();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Preprocessor::usingPCHWithThroughHeader() {
|
|
|
|
return TUKind != TU_Prefix && !PPOpts->PCHThroughHeader.empty() &&
|
|
|
|
PCHThroughHeaderFileID.isValid();
|
|
|
|
}
|
|
|
|
|
2018-09-12 01:10:44 +08:00
|
|
|
bool Preprocessor::creatingPCHWithPragmaHdrStop() {
|
|
|
|
return TUKind == TU_Prefix && PPOpts->PCHWithHdrStop;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Preprocessor::usingPCHWithPragmaHdrStop() {
|
|
|
|
return TUKind != TU_Prefix && PPOpts->PCHWithHdrStop;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Skip tokens until after the #include of the through header or
|
|
|
|
/// until after a #pragma hdrstop is seen. Tokens in the predefines file
|
|
|
|
/// and the main file may be skipped. If the end of the predefines file
|
|
|
|
/// is reached, skipping continues into the main file. If the end of the
|
|
|
|
/// main file is reached, it's a fatal error.
|
|
|
|
void Preprocessor::SkipTokensWhileUsingPCH() {
|
2018-07-06 01:22:13 +08:00
|
|
|
bool ReachedMainFileEOF = false;
|
2018-09-12 01:10:44 +08:00
|
|
|
bool UsingPCHThroughHeader = SkippingUntilPCHThroughHeader;
|
|
|
|
bool UsingPragmaHdrStop = SkippingUntilPragmaHdrStop;
|
2018-07-06 01:22:13 +08:00
|
|
|
Token Tok;
|
|
|
|
while (true) {
|
|
|
|
bool InPredefines = (CurLexer->getFileID() == getPredefinesFileID());
|
|
|
|
CurLexer->Lex(Tok);
|
|
|
|
if (Tok.is(tok::eof) && !InPredefines) {
|
|
|
|
ReachedMainFileEOF = true;
|
|
|
|
break;
|
|
|
|
}
|
2018-09-12 01:10:44 +08:00
|
|
|
if (UsingPCHThroughHeader && !SkippingUntilPCHThroughHeader)
|
|
|
|
break;
|
|
|
|
if (UsingPragmaHdrStop && !SkippingUntilPragmaHdrStop)
|
2018-07-06 01:22:13 +08:00
|
|
|
break;
|
|
|
|
}
|
2018-09-12 01:10:44 +08:00
|
|
|
if (ReachedMainFileEOF) {
|
|
|
|
if (UsingPCHThroughHeader)
|
|
|
|
Diag(SourceLocation(), diag::err_pp_through_header_not_seen)
|
|
|
|
<< PPOpts->PCHThroughHeader << 1;
|
|
|
|
else if (!PPOpts->PCHWithHdrStopCreate)
|
|
|
|
Diag(SourceLocation(), diag::err_pp_pragma_hdrstop_not_seen);
|
|
|
|
}
|
2017-07-05 17:44:07 +08:00
|
|
|
}
|
2017-05-30 19:54:55 +08:00
|
|
|
|
2017-07-05 17:44:07 +08:00
|
|
|
void Preprocessor::replayPreambleConditionalStack() {
|
2017-05-30 19:54:55 +08:00
|
|
|
// Restore the conditional stack from the preamble, if there is one.
|
|
|
|
if (PreambleConditionalStack.isReplaying()) {
|
2017-08-21 20:03:08 +08:00
|
|
|
assert(CurPPLexer &&
|
|
|
|
"CurPPLexer is null when calling replayPreambleConditionalStack.");
|
2017-05-30 19:54:55 +08:00
|
|
|
CurPPLexer->setConditionalLevels(PreambleConditionalStack.getStack());
|
|
|
|
PreambleConditionalStack.doneReplaying();
|
2017-11-03 17:40:07 +08:00
|
|
|
if (PreambleConditionalStack.reachedEOFWhileSkipping())
|
|
|
|
SkipExcludedConditionalBlock(
|
|
|
|
PreambleConditionalStack.SkipInfo->HashTokenLoc,
|
|
|
|
PreambleConditionalStack.SkipInfo->IfTokenLoc,
|
|
|
|
PreambleConditionalStack.SkipInfo->FoundNonSkipPortion,
|
|
|
|
PreambleConditionalStack.SkipInfo->FoundElse,
|
|
|
|
PreambleConditionalStack.SkipInfo->ElseLoc);
|
2017-05-30 19:54:55 +08:00
|
|
|
}
|
2007-10-10 06:10:18 +08:00
|
|
|
}
|
2007-07-16 14:48:38 +08:00
|
|
|
|
2010-03-23 13:09:10 +08:00
|
|
|
void Preprocessor::EndSourceFile() {
|
|
|
|
// Notify the client that we reached the end of the source file.
|
|
|
|
if (Callbacks)
|
|
|
|
Callbacks->EndOfMainFile();
|
|
|
|
}
|
2006-06-28 13:26:32 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Lexer Event Handling.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2010-12-22 16:23:18 +08:00
|
|
|
/// LookUpIdentifierInfo - Given a tok::raw_identifier token, look up the
|
|
|
|
/// identifier information for the token and install it into the token,
|
|
|
|
/// updating the token kind accordingly.
|
|
|
|
IdentifierInfo *Preprocessor::LookUpIdentifierInfo(Token &Identifier) const {
|
2014-05-17 12:53:25 +08:00
|
|
|
assert(!Identifier.getRawIdentifier().empty() && "No raw identifier data!");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-07-08 16:28:12 +08:00
|
|
|
// Look up this token, see if it is a macro, or if it is a language keyword.
|
|
|
|
IdentifierInfo *II;
|
2013-01-25 04:50:46 +08:00
|
|
|
if (!Identifier.needsCleaning() && !Identifier.hasUCN()) {
|
2006-07-08 16:28:12 +08:00
|
|
|
// No cleaning needed, just use the characters from the lexed buffer.
|
2014-05-17 12:53:25 +08:00
|
|
|
II = getIdentifierInfo(Identifier.getRawIdentifier());
|
2006-07-08 16:28:12 +08:00
|
|
|
} else {
|
|
|
|
// Cleaning needed, alloca a buffer, clean into it, then use the buffer.
|
2012-02-05 10:13:05 +08:00
|
|
|
SmallString<64> IdentifierBuffer;
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef CleanedStr = getSpelling(Identifier, IdentifierBuffer);
|
2013-01-25 04:50:46 +08:00
|
|
|
|
|
|
|
if (Identifier.hasUCN()) {
|
|
|
|
SmallString<64> UCNIdentifierBuffer;
|
|
|
|
expandUCNs(UCNIdentifierBuffer, CleanedStr);
|
|
|
|
II = getIdentifierInfo(UCNIdentifierBuffer);
|
|
|
|
} else {
|
|
|
|
II = getIdentifierInfo(CleanedStr);
|
|
|
|
}
|
2006-07-08 16:28:12 +08:00
|
|
|
}
|
2010-12-22 16:23:18 +08:00
|
|
|
|
|
|
|
// Update the token info (identifier info and appropriate token kind).
|
2006-10-14 13:19:21 +08:00
|
|
|
Identifier.setIdentifierInfo(II);
|
2017-06-10 00:29:35 +08:00
|
|
|
if (getLangOpts().MSVCCompat && II->isCPlusPlusOperatorKeyword() &&
|
|
|
|
getSourceManager().isInSystemHeader(Identifier.getLocation()))
|
2017-12-05 07:16:21 +08:00
|
|
|
Identifier.setKind(tok::identifier);
|
2017-06-10 00:29:35 +08:00
|
|
|
else
|
|
|
|
Identifier.setKind(II->getTokenID());
|
2010-12-22 16:23:18 +08:00
|
|
|
|
2006-07-08 16:28:12 +08:00
|
|
|
return II;
|
|
|
|
}
|
|
|
|
|
2011-04-28 09:08:34 +08:00
|
|
|
void Preprocessor::SetPoisonReason(IdentifierInfo *II, unsigned DiagID) {
|
|
|
|
PoisonReasons[II] = DiagID;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Preprocessor::PoisonSEHIdentifiers(bool Poison) {
|
|
|
|
assert(Ident__exception_code && Ident__exception_info);
|
|
|
|
assert(Ident___exception_code && Ident___exception_info);
|
|
|
|
Ident__exception_code->setIsPoisoned(Poison);
|
|
|
|
Ident___exception_code->setIsPoisoned(Poison);
|
|
|
|
Ident_GetExceptionCode->setIsPoisoned(Poison);
|
|
|
|
Ident__exception_info->setIsPoisoned(Poison);
|
|
|
|
Ident___exception_info->setIsPoisoned(Poison);
|
|
|
|
Ident_GetExceptionInfo->setIsPoisoned(Poison);
|
|
|
|
Ident__abnormal_termination->setIsPoisoned(Poison);
|
|
|
|
Ident___abnormal_termination->setIsPoisoned(Poison);
|
|
|
|
Ident_AbnormalTermination->setIsPoisoned(Poison);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Preprocessor::HandlePoisonedIdentifier(Token & Identifier) {
|
|
|
|
assert(Identifier.getIdentifierInfo() &&
|
|
|
|
"Can't handle identifiers without identifier info!");
|
|
|
|
llvm::DenseMap<IdentifierInfo*,unsigned>::const_iterator it =
|
|
|
|
PoisonReasons.find(Identifier.getIdentifierInfo());
|
|
|
|
if(it == PoisonReasons.end())
|
|
|
|
Diag(Identifier, diag::err_pp_used_poisoned_id);
|
|
|
|
else
|
|
|
|
Diag(Identifier,it->second) << Identifier.getIdentifierInfo();
|
|
|
|
}
|
2006-07-08 16:28:12 +08:00
|
|
|
|
2018-05-09 09:00:01 +08:00
|
|
|
/// Returns a diagnostic message kind for reporting a future keyword as
|
2015-05-14 12:00:59 +08:00
|
|
|
/// appropriate for the identifier and specified language.
|
|
|
|
static diag::kind getFutureCompatDiagKind(const IdentifierInfo &II,
|
|
|
|
const LangOptions &LangOpts) {
|
|
|
|
assert(II.isFutureCompatKeyword() && "diagnostic should not be needed");
|
|
|
|
|
|
|
|
if (LangOpts.CPlusPlus)
|
|
|
|
return llvm::StringSwitch<diag::kind>(II.getName())
|
|
|
|
#define CXX11_KEYWORD(NAME, FLAGS) \
|
|
|
|
.Case(#NAME, diag::warn_cxx11_keyword)
|
2017-08-14 05:32:33 +08:00
|
|
|
#define CXX2A_KEYWORD(NAME, FLAGS) \
|
|
|
|
.Case(#NAME, diag::warn_cxx2a_keyword)
|
2015-05-14 12:00:59 +08:00
|
|
|
#include "clang/Basic/TokenKinds.def"
|
|
|
|
;
|
|
|
|
|
|
|
|
llvm_unreachable(
|
|
|
|
"Keyword not known to come from a newer Standard or proposed Standard");
|
|
|
|
}
|
|
|
|
|
2016-08-18 09:16:55 +08:00
|
|
|
void Preprocessor::updateOutOfDateIdentifier(IdentifierInfo &II) const {
|
|
|
|
assert(II.isOutOfDate() && "not out of date");
|
|
|
|
getExternalSource()->updateOutOfDateIdentifier(II);
|
|
|
|
}
|
|
|
|
|
2006-06-28 13:26:32 +08:00
|
|
|
/// HandleIdentifier - This callback is invoked when the lexer reads an
|
|
|
|
/// identifier. This callback looks up the identifier in the map and/or
|
|
|
|
/// potentially macro expands it or turns it into a named token (like 'for').
|
2009-01-21 15:43:11 +08:00
|
|
|
///
|
|
|
|
/// Note that callers of this method are guarded by checking the
|
|
|
|
/// IdentifierInfo's 'isHandleIdentifierCase' bit. If this method changes, the
|
|
|
|
/// IdentifierInfo methods that compute these properties will need to change to
|
|
|
|
/// match.
|
2013-09-19 08:41:32 +08:00
|
|
|
bool Preprocessor::HandleIdentifier(Token &Identifier) {
|
2006-07-20 12:16:23 +08:00
|
|
|
assert(Identifier.getIdentifierInfo() &&
|
|
|
|
"Can't handle identifiers without identifier info!");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-07-05 01:53:21 +08:00
|
|
|
IdentifierInfo &II = *Identifier.getIdentifierInfo();
|
2006-06-28 13:26:32 +08:00
|
|
|
|
Make the loading of information attached to an IdentifierInfo from an
AST file more lazy, so that we don't eagerly load that information for
all known identifiers each time a new AST file is loaded. The eager
reloading made some sense in the context of precompiled headers, since
very few identifiers were defined before PCH load time. With modules,
however, a huge amount of code can get parsed before we see an
@import, so laziness becomes important here.
The approach taken to make this information lazy is fairly simple:
when we load a new AST file, we mark all of the existing identifiers
as being out-of-date. Whenever we want to access information that may
come from an AST (e.g., whether the identifier has a macro definition,
or what top-level declarations have that name), we check the
out-of-date bit and, if it's set, ask the AST reader to update the
IdentifierInfo from the AST files. The update is a merge, and we now
take care to merge declarations before/after imports with declarations
from multiple imports.
The results of this optimization are fairly dramatic. On a small
application that brings in 14 non-trivial modules, this takes modules
from being > 3x slower than a "perfect" PCH file down to 30% slower
for a full rebuild. A partial rebuild (where the PCH file or modules
can be re-used) is down to 7% slower. Making the PCH file just a
little imperfect (e.g., adding two smallish modules used by a bunch of
.m files that aren't in the PCH file) tips the scales in favor of the
modules approach, with 24% faster partial rebuilds.
This is just a first step; the lazy scheme could possibly be improved
by adding versioning, so we don't search into modules we already
searched. Moreover, we'll need similar lazy schemes for all of the
other lookup data structures, such as DeclContexts.
llvm-svn: 143100
2011-10-27 17:33:13 +08:00
|
|
|
// If the information about this identifier is out of date, update it from
|
|
|
|
// the external source.
|
2012-06-30 02:27:59 +08:00
|
|
|
// We have to treat __VA_ARGS__ in a special way, since it gets
|
|
|
|
// serialized with isPoisoned = true, but our preprocessor may have
|
|
|
|
// unpoisoned it if we're defining a C99 macro.
|
Make the loading of information attached to an IdentifierInfo from an
AST file more lazy, so that we don't eagerly load that information for
all known identifiers each time a new AST file is loaded. The eager
reloading made some sense in the context of precompiled headers, since
very few identifiers were defined before PCH load time. With modules,
however, a huge amount of code can get parsed before we see an
@import, so laziness becomes important here.
The approach taken to make this information lazy is fairly simple:
when we load a new AST file, we mark all of the existing identifiers
as being out-of-date. Whenever we want to access information that may
come from an AST (e.g., whether the identifier has a macro definition,
or what top-level declarations have that name), we check the
out-of-date bit and, if it's set, ask the AST reader to update the
IdentifierInfo from the AST files. The update is a merge, and we now
take care to merge declarations before/after imports with declarations
from multiple imports.
The results of this optimization are fairly dramatic. On a small
application that brings in 14 non-trivial modules, this takes modules
from being > 3x slower than a "perfect" PCH file down to 30% slower
for a full rebuild. A partial rebuild (where the PCH file or modules
can be re-used) is down to 7% slower. Making the PCH file just a
little imperfect (e.g., adding two smallish modules used by a bunch of
.m files that aren't in the PCH file) tips the scales in favor of the
modules approach, with 24% faster partial rebuilds.
This is just a first step; the lazy scheme could possibly be improved
by adding versioning, so we don't search into modules we already
searched. Moreover, we'll need similar lazy schemes for all of the
other lookup data structures, such as DeclContexts.
llvm-svn: 143100
2011-10-27 17:33:13 +08:00
|
|
|
if (II.isOutOfDate()) {
|
2012-06-30 02:27:59 +08:00
|
|
|
bool CurrentIsPoisoned = false;
|
2017-10-15 09:26:26 +08:00
|
|
|
const bool IsSpecialVariadicMacro =
|
|
|
|
&II == Ident__VA_ARGS__ || &II == Ident__VA_OPT__;
|
|
|
|
if (IsSpecialVariadicMacro)
|
|
|
|
CurrentIsPoisoned = II.isPoisoned();
|
2012-06-30 02:27:59 +08:00
|
|
|
|
2016-08-18 09:16:55 +08:00
|
|
|
updateOutOfDateIdentifier(II);
|
Make the loading of information attached to an IdentifierInfo from an
AST file more lazy, so that we don't eagerly load that information for
all known identifiers each time a new AST file is loaded. The eager
reloading made some sense in the context of precompiled headers, since
very few identifiers were defined before PCH load time. With modules,
however, a huge amount of code can get parsed before we see an
@import, so laziness becomes important here.
The approach taken to make this information lazy is fairly simple:
when we load a new AST file, we mark all of the existing identifiers
as being out-of-date. Whenever we want to access information that may
come from an AST (e.g., whether the identifier has a macro definition,
or what top-level declarations have that name), we check the
out-of-date bit and, if it's set, ask the AST reader to update the
IdentifierInfo from the AST files. The update is a merge, and we now
take care to merge declarations before/after imports with declarations
from multiple imports.
The results of this optimization are fairly dramatic. On a small
application that brings in 14 non-trivial modules, this takes modules
from being > 3x slower than a "perfect" PCH file down to 30% slower
for a full rebuild. A partial rebuild (where the PCH file or modules
can be re-used) is down to 7% slower. Making the PCH file just a
little imperfect (e.g., adding two smallish modules used by a bunch of
.m files that aren't in the PCH file) tips the scales in favor of the
modules approach, with 24% faster partial rebuilds.
This is just a first step; the lazy scheme could possibly be improved
by adding versioning, so we don't search into modules we already
searched. Moreover, we'll need similar lazy schemes for all of the
other lookup data structures, such as DeclContexts.
llvm-svn: 143100
2011-10-27 17:33:13 +08:00
|
|
|
Identifier.setKind(II.getTokenID());
|
2012-06-30 02:27:59 +08:00
|
|
|
|
2017-10-15 09:26:26 +08:00
|
|
|
if (IsSpecialVariadicMacro)
|
2012-06-30 02:27:59 +08:00
|
|
|
II.setIsPoisoned(CurrentIsPoisoned);
|
Make the loading of information attached to an IdentifierInfo from an
AST file more lazy, so that we don't eagerly load that information for
all known identifiers each time a new AST file is loaded. The eager
reloading made some sense in the context of precompiled headers, since
very few identifiers were defined before PCH load time. With modules,
however, a huge amount of code can get parsed before we see an
@import, so laziness becomes important here.
The approach taken to make this information lazy is fairly simple:
when we load a new AST file, we mark all of the existing identifiers
as being out-of-date. Whenever we want to access information that may
come from an AST (e.g., whether the identifier has a macro definition,
or what top-level declarations have that name), we check the
out-of-date bit and, if it's set, ask the AST reader to update the
IdentifierInfo from the AST files. The update is a merge, and we now
take care to merge declarations before/after imports with declarations
from multiple imports.
The results of this optimization are fairly dramatic. On a small
application that brings in 14 non-trivial modules, this takes modules
from being > 3x slower than a "perfect" PCH file down to 30% slower
for a full rebuild. A partial rebuild (where the PCH file or modules
can be re-used) is down to 7% slower. Making the PCH file just a
little imperfect (e.g., adding two smallish modules used by a bunch of
.m files that aren't in the PCH file) tips the scales in favor of the
modules approach, with 24% faster partial rebuilds.
This is just a first step; the lazy scheme could possibly be improved
by adding versioning, so we don't search into modules we already
searched. Moreover, we'll need similar lazy schemes for all of the
other lookup data structures, such as DeclContexts.
llvm-svn: 143100
2011-10-27 17:33:13 +08:00
|
|
|
}
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2006-06-28 13:26:32 +08:00
|
|
|
// If this identifier was poisoned, and if it was not produced from a macro
|
|
|
|
// expansion, emit an error.
|
2008-11-20 06:43:49 +08:00
|
|
|
if (II.isPoisoned() && CurPPLexer) {
|
2011-04-28 09:08:34 +08:00
|
|
|
HandlePoisonedIdentifier(Identifier);
|
2006-07-06 13:17:39 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-07-09 08:45:31 +08:00
|
|
|
// If this is a macro to be expanded, do it.
|
2015-04-30 07:20:19 +08:00
|
|
|
if (MacroDefinition MD = getMacroDefinition(&II)) {
|
|
|
|
auto *MI = MD.getMacroInfo();
|
2015-04-30 07:40:48 +08:00
|
|
|
assert(MI && "macro definition with no macro info?");
|
2012-01-02 06:01:04 +08:00
|
|
|
if (!DisableMacroExpansion) {
|
2012-12-12 10:46:14 +08:00
|
|
|
if (!Identifier.isExpandDisabled() && MI->isEnabled()) {
|
2013-09-19 08:41:32 +08:00
|
|
|
// C99 6.10.3p10: If the preprocessing token immediately after the
|
|
|
|
// macro name isn't a '(', this macro should not be expanded.
|
|
|
|
if (!MI->isFunctionLike() || isNextPPTokenLParen())
|
|
|
|
return HandleMacroExpandedIdentifier(Identifier, MD);
|
2006-07-27 14:59:25 +08:00
|
|
|
} else {
|
|
|
|
// C99 6.10.3.4p2 says that a disabled macro may never again be
|
|
|
|
// expanded, even if it's in a context where it could be expanded in the
|
|
|
|
// future.
|
2007-07-21 00:59:19 +08:00
|
|
|
Identifier.setFlag(Token::DisableExpand);
|
2012-12-12 10:46:14 +08:00
|
|
|
if (MI->isObjectLike() || isNextPPTokenLParen())
|
|
|
|
Diag(Identifier, diag::pp_disabled_macro_expansion);
|
2006-07-27 14:59:25 +08:00
|
|
|
}
|
|
|
|
}
|
2006-10-15 03:54:15 +08:00
|
|
|
}
|
2006-06-28 13:26:32 +08:00
|
|
|
|
2015-05-14 12:00:59 +08:00
|
|
|
// If this identifier is a keyword in a newer Standard or proposed Standard,
|
|
|
|
// produce a warning. Don't warn if we're not considering macro expansion,
|
|
|
|
// since this identifier might be the name of a macro.
|
2011-10-12 03:57:52 +08:00
|
|
|
// FIXME: This warning is disabled in cases where it shouldn't be, like
|
|
|
|
// "#define constexpr constexpr", "int constexpr;"
|
2015-05-14 12:00:59 +08:00
|
|
|
if (II.isFutureCompatKeyword() && !DisableMacroExpansion) {
|
|
|
|
Diag(Identifier, getFutureCompatDiagKind(II, getLangOpts()))
|
|
|
|
<< II.getName();
|
2011-10-12 03:57:52 +08:00
|
|
|
// Don't diagnose this keyword again in this translation unit.
|
2015-05-14 12:00:59 +08:00
|
|
|
II.setIsFutureCompatKeyword(false);
|
2011-10-12 03:57:52 +08:00
|
|
|
}
|
|
|
|
|
2006-06-28 13:26:32 +08:00
|
|
|
// If this is an extension token, diagnose its use.
|
2008-09-03 02:50:17 +08:00
|
|
|
// We avoid diagnosing tokens that originate from macro definitions.
|
2009-04-28 11:59:15 +08:00
|
|
|
// FIXME: This warning is disabled in cases where it shouldn't be,
|
|
|
|
// like "#define TY typeof", "TY(1) x".
|
|
|
|
if (II.isExtensionToken() && !DisableMacroExpansion)
|
2007-06-14 04:44:40 +08:00
|
|
|
Diag(Identifier, diag::ext_token_used);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2013-11-08 06:55:02 +08:00
|
|
|
// If this is the 'import' contextual keyword following an '@', note
|
2012-03-02 06:07:04 +08:00
|
|
|
// that the next token indicates a module name.
|
2012-01-04 14:20:15 +08:00
|
|
|
//
|
2012-12-12 06:11:52 +08:00
|
|
|
// Note that we do not treat 'import' as a contextual
|
2012-03-02 06:07:04 +08:00
|
|
|
// keyword when we're in a caching lexer, because caching lexers only get
|
|
|
|
// used in contexts where import declarations are disallowed.
|
2016-08-19 05:59:42 +08:00
|
|
|
//
|
|
|
|
// Likewise if this is the C++ Modules TS import keyword.
|
|
|
|
if (((LastTokenWasAt && II.isModulesImport()) ||
|
|
|
|
Identifier.is(tok::kw_import)) &&
|
|
|
|
!InMacroArgs && !DisableMacroExpansion &&
|
|
|
|
(getLangOpts().Modules || getLangOpts().DebuggerSupport) &&
|
2013-11-08 06:55:02 +08:00
|
|
|
CurLexerKind != CLK_CachingLexer) {
|
2011-09-08 07:11:54 +08:00
|
|
|
ModuleImportLoc = Identifier.getLocation();
|
2011-11-30 12:26:53 +08:00
|
|
|
ModuleImportPath.clear();
|
|
|
|
ModuleImportExpectsIdentifier = true;
|
2011-09-08 07:11:54 +08:00
|
|
|
CurLexerKind = CLK_LexAfterModuleImport;
|
|
|
|
}
|
2013-09-19 08:41:32 +08:00
|
|
|
return true;
|
2011-08-27 07:56:07 +08:00
|
|
|
}
|
|
|
|
|
2013-09-19 08:41:32 +08:00
|
|
|
void Preprocessor::Lex(Token &Result) {
|
2015-09-30 00:51:08 +08:00
|
|
|
// We loop here until a lex function returns a token; this avoids recursion.
|
2013-09-19 08:41:32 +08:00
|
|
|
bool ReturnedToken;
|
|
|
|
do {
|
|
|
|
switch (CurLexerKind) {
|
|
|
|
case CLK_Lexer:
|
|
|
|
ReturnedToken = CurLexer->Lex(Result);
|
|
|
|
break;
|
|
|
|
case CLK_TokenLexer:
|
|
|
|
ReturnedToken = CurTokenLexer->Lex(Result);
|
|
|
|
break;
|
|
|
|
case CLK_CachingLexer:
|
|
|
|
CachingLex(Result);
|
|
|
|
ReturnedToken = true;
|
|
|
|
break;
|
|
|
|
case CLK_LexAfterModuleImport:
|
|
|
|
LexAfterModuleImport(Result);
|
|
|
|
ReturnedToken = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} while (!ReturnedToken);
|
2013-11-08 06:55:02 +08:00
|
|
|
|
2018-01-23 01:18:28 +08:00
|
|
|
if (Result.is(tok::code_completion) && Result.getIdentifierInfo()) {
|
|
|
|
// Remember the identifier before code completion token.
|
2016-07-27 22:56:59 +08:00
|
|
|
setCodeCompletionIdentifierInfo(Result.getIdentifierInfo());
|
2018-08-13 16:13:35 +08:00
|
|
|
setCodeCompletionTokenRange(Result.getLocation(), Result.getEndLoc());
|
2018-01-23 01:18:28 +08:00
|
|
|
// Set IdenfitierInfo to null to avoid confusing code that handles both
|
|
|
|
// identifiers and completion tokens.
|
|
|
|
Result.setIdentifierInfo(nullptr);
|
|
|
|
}
|
2016-07-27 22:56:59 +08:00
|
|
|
|
2013-11-08 06:55:02 +08:00
|
|
|
LastTokenWasAt = Result.is(tok::at);
|
2013-09-19 08:41:32 +08:00
|
|
|
}
|
|
|
|
|
2018-05-09 09:00:01 +08:00
|
|
|
/// Lex a token following the 'import' contextual keyword.
|
2012-01-04 02:04:46 +08:00
|
|
|
///
|
2011-09-08 07:11:54 +08:00
|
|
|
void Preprocessor::LexAfterModuleImport(Token &Result) {
|
|
|
|
// Figure out what kind of lexer we actually have.
|
2012-01-04 14:20:15 +08:00
|
|
|
recomputeCurLexerKind();
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2011-09-08 07:11:54 +08:00
|
|
|
// Lex the next token.
|
|
|
|
Lex(Result);
|
|
|
|
|
2018-07-31 03:24:48 +08:00
|
|
|
// The token sequence
|
2011-08-27 07:56:07 +08:00
|
|
|
//
|
2012-01-04 02:04:46 +08:00
|
|
|
// import identifier (. identifier)*
|
|
|
|
//
|
2018-07-31 03:24:48 +08:00
|
|
|
// indicates a module import directive. We already saw the 'import'
|
2012-01-04 03:32:59 +08:00
|
|
|
// contextual keyword, so now we're looking for the identifiers.
|
2011-11-30 12:26:53 +08:00
|
|
|
if (ModuleImportExpectsIdentifier && Result.getKind() == tok::identifier) {
|
|
|
|
// We expected to see an identifier here, and we did; continue handling
|
|
|
|
// identifiers.
|
|
|
|
ModuleImportPath.push_back(std::make_pair(Result.getIdentifierInfo(),
|
|
|
|
Result.getLocation()));
|
|
|
|
ModuleImportExpectsIdentifier = false;
|
|
|
|
CurLexerKind = CLK_LexAfterModuleImport;
|
2011-08-27 07:56:07 +08:00
|
|
|
return;
|
2011-11-30 12:26:53 +08:00
|
|
|
}
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2011-11-30 12:26:53 +08:00
|
|
|
// If we're expecting a '.' or a ';', and we got a '.', then wait until we
|
2016-08-19 05:59:42 +08:00
|
|
|
// see the next identifier. (We can also see a '[[' that begins an
|
|
|
|
// attribute-specifier-seq here under the C++ Modules TS.)
|
2011-11-30 12:26:53 +08:00
|
|
|
if (!ModuleImportExpectsIdentifier && Result.getKind() == tok::period) {
|
|
|
|
ModuleImportExpectsIdentifier = true;
|
|
|
|
CurLexerKind = CLK_LexAfterModuleImport;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have a non-empty module path, load the named module.
|
2014-12-10 07:47:56 +08:00
|
|
|
if (!ModuleImportPath.empty()) {
|
2016-08-26 08:14:38 +08:00
|
|
|
// Under the Modules TS, the dot is just part of the module name, and not
|
|
|
|
// a real hierarachy separator. Flatten such module names now.
|
|
|
|
//
|
|
|
|
// FIXME: Is this the right level to be performing this transformation?
|
|
|
|
std::string FlatModuleName;
|
|
|
|
if (getLangOpts().ModulesTS) {
|
|
|
|
for (auto &Piece : ModuleImportPath) {
|
|
|
|
if (!FlatModuleName.empty())
|
|
|
|
FlatModuleName += ".";
|
|
|
|
FlatModuleName += Piece.first->getName();
|
|
|
|
}
|
|
|
|
SourceLocation FirstPathLoc = ModuleImportPath[0].second;
|
|
|
|
ModuleImportPath.clear();
|
|
|
|
ModuleImportPath.push_back(
|
|
|
|
std::make_pair(getIdentifierInfo(FlatModuleName), FirstPathLoc));
|
|
|
|
}
|
|
|
|
|
2014-12-10 07:47:56 +08:00
|
|
|
Module *Imported = nullptr;
|
2015-04-28 07:21:38 +08:00
|
|
|
if (getLangOpts().Modules) {
|
2014-12-10 07:47:56 +08:00
|
|
|
Imported = TheModuleLoader.loadModule(ModuleImportLoc,
|
|
|
|
ModuleImportPath,
|
2015-05-02 10:08:26 +08:00
|
|
|
Module::Hidden,
|
2014-12-10 07:47:56 +08:00
|
|
|
/*IsIncludeDirective=*/false);
|
2015-05-01 09:53:09 +08:00
|
|
|
if (Imported)
|
|
|
|
makeModuleVisible(Imported, ModuleImportLoc);
|
2015-04-28 07:21:38 +08:00
|
|
|
}
|
2014-12-10 07:47:56 +08:00
|
|
|
if (Callbacks && (getLangOpts().Modules || getLangOpts().DebuggerSupport))
|
2012-09-29 09:06:10 +08:00
|
|
|
Callbacks->moduleImport(ModuleImportLoc, ModuleImportPath, Imported);
|
|
|
|
}
|
2006-06-28 13:26:32 +08:00
|
|
|
}
|
Add support for retrieving the Doxygen comment associated with a given
declaration in the AST.
The new ASTContext::getCommentForDecl function searches for a comment
that is attached to the given declaration, and returns that comment,
which may be composed of several comment blocks.
Comments are always available in an AST. However, to avoid harming
performance, we don't actually parse the comments. Rather, we keep the
source ranges of all of the comments within a large, sorted vector,
then lazily extract comments via a binary search in that vector only
when needed (which never occurs in a "normal" compile).
Comments are written to a precompiled header/AST file as a blob of
source ranges. That blob is only lazily loaded when one requests a
comment for a declaration (this never occurs in a "normal" compile).
The indexer testbed now supports comment extraction. When the
-point-at location points to a declaration with a Doxygen-style
comment, the indexer testbed prints the associated comment
block(s). See test/Index/comments.c for an example.
Some notes:
- We don't actually attempt to parse the comment blocks themselves,
beyond identifying them as Doxygen comment blocks to associate them
with a declaration.
- We won't find comment blocks that aren't adjacent to the
declaration, because we start our search based on the location of
the declaration.
- We don't go through the necessary hops to find, for example,
whether some redeclaration of a declaration has comments when our
current declaration does not. Similarly, we don't attempt to
associate a \param Foo marker in a function body comment with the
parameter named Foo (although that is certainly possible).
- Verification of my "no performance impact" claims is still "to be
done".
llvm-svn: 74704
2009-07-03 01:08:52 +08:00
|
|
|
|
2015-05-01 09:53:09 +08:00
|
|
|
void Preprocessor::makeModuleVisible(Module *M, SourceLocation Loc) {
|
2015-05-21 09:20:10 +08:00
|
|
|
CurSubmoduleState->VisibleModules.setVisible(
|
2015-05-01 09:53:09 +08:00
|
|
|
M, Loc, [](Module *) {},
|
|
|
|
[&](ArrayRef<Module *> Path, Module *Conflict, StringRef Message) {
|
|
|
|
// FIXME: Include the path in the diagnostic.
|
|
|
|
// FIXME: Include the import location for the conflicting module.
|
|
|
|
Diag(ModuleImportLoc, diag::warn_module_conflict)
|
|
|
|
<< Path[0]->getFullModuleName()
|
|
|
|
<< Conflict->getFullModuleName()
|
|
|
|
<< Message;
|
|
|
|
});
|
|
|
|
|
|
|
|
// Add this module to the imports list of the currently-built submodule.
|
2015-05-14 10:25:44 +08:00
|
|
|
if (!BuildingSubmoduleStack.empty() && M != BuildingSubmoduleStack.back().M)
|
2015-05-02 08:45:56 +08:00
|
|
|
BuildingSubmoduleStack.back().M->Imports.insert(M);
|
2015-05-01 09:53:09 +08:00
|
|
|
}
|
|
|
|
|
2012-11-18 03:15:38 +08:00
|
|
|
bool Preprocessor::FinishLexStringLiteral(Token &Result, std::string &String,
|
2012-11-18 03:16:52 +08:00
|
|
|
const char *DiagnosticTag,
|
2012-11-18 03:15:38 +08:00
|
|
|
bool AllowMacroExpansion) {
|
|
|
|
// We need at least one string literal.
|
|
|
|
if (Result.isNot(tok::string_literal)) {
|
2012-11-18 03:16:52 +08:00
|
|
|
Diag(Result, diag::err_expected_string_literal)
|
|
|
|
<< /*Source='in...'*/0 << DiagnosticTag;
|
2012-11-18 03:15:38 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lex string literal tokens, optionally with macro expansion.
|
|
|
|
SmallVector<Token, 4> StrToks;
|
|
|
|
do {
|
|
|
|
StrToks.push_back(Result);
|
|
|
|
|
|
|
|
if (Result.hasUDSuffix())
|
|
|
|
Diag(Result, diag::err_invalid_string_udl);
|
|
|
|
|
|
|
|
if (AllowMacroExpansion)
|
|
|
|
Lex(Result);
|
|
|
|
else
|
|
|
|
LexUnexpandedToken(Result);
|
|
|
|
} while (Result.is(tok::string_literal));
|
|
|
|
|
|
|
|
// Concatenate and parse the strings.
|
2014-06-26 12:58:39 +08:00
|
|
|
StringLiteralParser Literal(StrToks, *this);
|
2012-11-18 03:15:38 +08:00
|
|
|
assert(Literal.isAscii() && "Didn't allow wide strings in");
|
|
|
|
|
|
|
|
if (Literal.hadError)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (Literal.Pascal) {
|
2012-11-18 03:16:52 +08:00
|
|
|
Diag(StrToks[0].getLocation(), diag::err_expected_string_literal)
|
|
|
|
<< /*Source='in...'*/0 << DiagnosticTag;
|
2012-11-18 03:15:38 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
String = Literal.GetString();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-02-13 07:50:26 +08:00
|
|
|
bool Preprocessor::parseSimpleIntegerLiteral(Token &Tok, uint64_t &Value) {
|
|
|
|
assert(Tok.is(tok::numeric_constant));
|
|
|
|
SmallString<8> IntegerBuffer;
|
|
|
|
bool NumberInvalid = false;
|
|
|
|
StringRef Spelling = getSpelling(Tok, IntegerBuffer, &NumberInvalid);
|
|
|
|
if (NumberInvalid)
|
|
|
|
return false;
|
|
|
|
NumericLiteralParser Literal(Spelling, Tok.getLocation(), *this);
|
|
|
|
if (Literal.hadError || !Literal.isIntegerLiteral() || Literal.hasUDSuffix())
|
|
|
|
return false;
|
|
|
|
llvm::APInt APVal(64, 0);
|
|
|
|
if (Literal.GetIntegerValue(APVal))
|
|
|
|
return false;
|
|
|
|
Lex(Tok);
|
|
|
|
Value = APVal.getLimitedValue();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-06-20 08:34:58 +08:00
|
|
|
void Preprocessor::addCommentHandler(CommentHandler *Handler) {
|
Add support for retrieving the Doxygen comment associated with a given
declaration in the AST.
The new ASTContext::getCommentForDecl function searches for a comment
that is attached to the given declaration, and returns that comment,
which may be composed of several comment blocks.
Comments are always available in an AST. However, to avoid harming
performance, we don't actually parse the comments. Rather, we keep the
source ranges of all of the comments within a large, sorted vector,
then lazily extract comments via a binary search in that vector only
when needed (which never occurs in a "normal" compile).
Comments are written to a precompiled header/AST file as a blob of
source ranges. That blob is only lazily loaded when one requests a
comment for a declaration (this never occurs in a "normal" compile).
The indexer testbed now supports comment extraction. When the
-point-at location points to a declaration with a Doxygen-style
comment, the indexer testbed prints the associated comment
block(s). See test/Index/comments.c for an example.
Some notes:
- We don't actually attempt to parse the comment blocks themselves,
beyond identifying them as Doxygen comment blocks to associate them
with a declaration.
- We won't find comment blocks that aren't adjacent to the
declaration, because we start our search based on the location of
the declaration.
- We don't go through the necessary hops to find, for example,
whether some redeclaration of a declaration has comments when our
current declaration does not. Similarly, we don't attempt to
associate a \param Foo marker in a function body comment with the
parameter named Foo (although that is certainly possible).
- Verification of my "no performance impact" claims is still "to be
done".
llvm-svn: 74704
2009-07-03 01:08:52 +08:00
|
|
|
assert(Handler && "NULL comment handler");
|
|
|
|
assert(std::find(CommentHandlers.begin(), CommentHandlers.end(), Handler) ==
|
|
|
|
CommentHandlers.end() && "Comment handler already registered");
|
|
|
|
CommentHandlers.push_back(Handler);
|
|
|
|
}
|
|
|
|
|
2012-06-20 08:34:58 +08:00
|
|
|
void Preprocessor::removeCommentHandler(CommentHandler *Handler) {
|
2017-12-05 07:16:21 +08:00
|
|
|
std::vector<CommentHandler *>::iterator Pos =
|
|
|
|
std::find(CommentHandlers.begin(), CommentHandlers.end(), Handler);
|
Add support for retrieving the Doxygen comment associated with a given
declaration in the AST.
The new ASTContext::getCommentForDecl function searches for a comment
that is attached to the given declaration, and returns that comment,
which may be composed of several comment blocks.
Comments are always available in an AST. However, to avoid harming
performance, we don't actually parse the comments. Rather, we keep the
source ranges of all of the comments within a large, sorted vector,
then lazily extract comments via a binary search in that vector only
when needed (which never occurs in a "normal" compile).
Comments are written to a precompiled header/AST file as a blob of
source ranges. That blob is only lazily loaded when one requests a
comment for a declaration (this never occurs in a "normal" compile).
The indexer testbed now supports comment extraction. When the
-point-at location points to a declaration with a Doxygen-style
comment, the indexer testbed prints the associated comment
block(s). See test/Index/comments.c for an example.
Some notes:
- We don't actually attempt to parse the comment blocks themselves,
beyond identifying them as Doxygen comment blocks to associate them
with a declaration.
- We won't find comment blocks that aren't adjacent to the
declaration, because we start our search based on the location of
the declaration.
- We don't go through the necessary hops to find, for example,
whether some redeclaration of a declaration has comments when our
current declaration does not. Similarly, we don't attempt to
associate a \param Foo marker in a function body comment with the
parameter named Foo (although that is certainly possible).
- Verification of my "no performance impact" claims is still "to be
done".
llvm-svn: 74704
2009-07-03 01:08:52 +08:00
|
|
|
assert(Pos != CommentHandlers.end() && "Comment handler not registered");
|
|
|
|
CommentHandlers.erase(Pos);
|
|
|
|
}
|
|
|
|
|
2010-01-19 06:35:47 +08:00
|
|
|
bool Preprocessor::HandleComment(Token &result, SourceRange Comment) {
|
|
|
|
bool AnyPendingTokens = false;
|
Add support for retrieving the Doxygen comment associated with a given
declaration in the AST.
The new ASTContext::getCommentForDecl function searches for a comment
that is attached to the given declaration, and returns that comment,
which may be composed of several comment blocks.
Comments are always available in an AST. However, to avoid harming
performance, we don't actually parse the comments. Rather, we keep the
source ranges of all of the comments within a large, sorted vector,
then lazily extract comments via a binary search in that vector only
when needed (which never occurs in a "normal" compile).
Comments are written to a precompiled header/AST file as a blob of
source ranges. That blob is only lazily loaded when one requests a
comment for a declaration (this never occurs in a "normal" compile).
The indexer testbed now supports comment extraction. When the
-point-at location points to a declaration with a Doxygen-style
comment, the indexer testbed prints the associated comment
block(s). See test/Index/comments.c for an example.
Some notes:
- We don't actually attempt to parse the comment blocks themselves,
beyond identifying them as Doxygen comment blocks to associate them
with a declaration.
- We won't find comment blocks that aren't adjacent to the
declaration, because we start our search based on the location of
the declaration.
- We don't go through the necessary hops to find, for example,
whether some redeclaration of a declaration has comments when our
current declaration does not. Similarly, we don't attempt to
associate a \param Foo marker in a function body comment with the
parameter named Foo (although that is certainly possible).
- Verification of my "no performance impact" claims is still "to be
done".
llvm-svn: 74704
2009-07-03 01:08:52 +08:00
|
|
|
for (std::vector<CommentHandler *>::iterator H = CommentHandlers.begin(),
|
|
|
|
HEnd = CommentHandlers.end();
|
2010-01-19 06:35:47 +08:00
|
|
|
H != HEnd; ++H) {
|
|
|
|
if ((*H)->HandleComment(*this, Comment))
|
|
|
|
AnyPendingTokens = true;
|
|
|
|
}
|
|
|
|
if (!AnyPendingTokens || getCommentRetentionState())
|
|
|
|
return false;
|
|
|
|
Lex(result);
|
|
|
|
return true;
|
Add support for retrieving the Doxygen comment associated with a given
declaration in the AST.
The new ASTContext::getCommentForDecl function searches for a comment
that is attached to the given declaration, and returns that comment,
which may be composed of several comment blocks.
Comments are always available in an AST. However, to avoid harming
performance, we don't actually parse the comments. Rather, we keep the
source ranges of all of the comments within a large, sorted vector,
then lazily extract comments via a binary search in that vector only
when needed (which never occurs in a "normal" compile).
Comments are written to a precompiled header/AST file as a blob of
source ranges. That blob is only lazily loaded when one requests a
comment for a declaration (this never occurs in a "normal" compile).
The indexer testbed now supports comment extraction. When the
-point-at location points to a declaration with a Doxygen-style
comment, the indexer testbed prints the associated comment
block(s). See test/Index/comments.c for an example.
Some notes:
- We don't actually attempt to parse the comment blocks themselves,
beyond identifying them as Doxygen comment blocks to associate them
with a declaration.
- We won't find comment blocks that aren't adjacent to the
declaration, because we start our search based on the location of
the declaration.
- We don't go through the necessary hops to find, for example,
whether some redeclaration of a declaration has comments when our
current declaration does not. Similarly, we don't attempt to
associate a \param Foo marker in a function body comment with the
parameter named Foo (although that is certainly possible).
- Verification of my "no performance impact" claims is still "to be
done".
llvm-svn: 74704
2009-07-03 01:08:52 +08:00
|
|
|
}
|
|
|
|
|
2017-12-05 07:16:21 +08:00
|
|
|
ModuleLoader::~ModuleLoader() = default;
|
2011-08-27 07:56:07 +08:00
|
|
|
|
2017-12-05 07:16:21 +08:00
|
|
|
CommentHandler::~CommentHandler() = default;
|
2010-03-20 00:15:56 +08:00
|
|
|
|
2017-12-05 07:16:21 +08:00
|
|
|
CodeCompletionHandler::~CodeCompletionHandler() = default;
|
2010-08-25 03:08:16 +08:00
|
|
|
|
2012-12-04 15:27:05 +08:00
|
|
|
void Preprocessor::createPreprocessingRecord() {
|
2010-03-20 00:15:56 +08:00
|
|
|
if (Record)
|
|
|
|
return;
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2012-12-04 15:27:05 +08:00
|
|
|
Record = new PreprocessingRecord(getSourceManager());
|
2014-09-10 12:53:53 +08:00
|
|
|
addPPCallbacks(std::unique_ptr<PPCallbacks>(Record));
|
2010-03-20 00:15:56 +08:00
|
|
|
}
|