2008-08-10 21:15:22 +08:00
|
|
|
//===--- PPCaching.cpp - Handle caching lexed tokens ----------------------===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2008-08-10 21:15:22 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements pieces of the Preprocessor interface that manage the
|
|
|
|
// caching of lexed tokens.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "clang/Lex/Preprocessor.h"
|
|
|
|
using namespace clang;
|
|
|
|
|
2013-11-27 09:27:40 +08:00
|
|
|
// EnableBacktrackAtThisPos - From the point that this method is called, and
|
|
|
|
// until CommitBacktrackedTokens() or Backtrack() is called, the Preprocessor
|
|
|
|
// keeps track of the lexed tokens so that a subsequent Backtrack() call will
|
|
|
|
// make the Preprocessor re-lex the same tokens.
|
|
|
|
//
|
|
|
|
// Nested backtracks are allowed, meaning that EnableBacktrackAtThisPos can
|
|
|
|
// be called multiple times and CommitBacktrackedTokens/Backtrack calls will
|
|
|
|
// be combined with the EnableBacktrackAtThisPos calls in reverse order.
|
2008-08-23 20:05:53 +08:00
|
|
|
void Preprocessor::EnableBacktrackAtThisPos() {
|
2019-04-12 05:18:22 +08:00
|
|
|
assert(LexLevel == 0 && "cannot use lookahead while lexing");
|
2008-08-23 20:05:53 +08:00
|
|
|
BacktrackPositions.push_back(CachedLexPos);
|
|
|
|
EnterCachingLexMode();
|
|
|
|
}
|
2008-08-23 05:27:50 +08:00
|
|
|
|
2013-11-27 09:27:40 +08:00
|
|
|
// Disable the last EnableBacktrackAtThisPos call.
|
2008-08-24 20:29:43 +08:00
|
|
|
void Preprocessor::CommitBacktrackedTokens() {
|
2008-08-23 20:05:53 +08:00
|
|
|
assert(!BacktrackPositions.empty()
|
|
|
|
&& "EnableBacktrackAtThisPos was not called!");
|
|
|
|
BacktrackPositions.pop_back();
|
|
|
|
}
|
|
|
|
|
2013-11-27 09:27:40 +08:00
|
|
|
// Make Preprocessor re-lex the tokens that were lexed since
|
|
|
|
// EnableBacktrackAtThisPos() was previously called.
|
2008-08-23 20:05:53 +08:00
|
|
|
void Preprocessor::Backtrack() {
|
|
|
|
assert(!BacktrackPositions.empty()
|
|
|
|
&& "EnableBacktrackAtThisPos was not called!");
|
|
|
|
CachedLexPos = BacktrackPositions.back();
|
|
|
|
BacktrackPositions.pop_back();
|
2012-01-04 14:20:15 +08:00
|
|
|
recomputeCurLexerKind();
|
2008-08-23 20:05:53 +08:00
|
|
|
}
|
2008-08-23 05:27:50 +08:00
|
|
|
|
2019-05-17 17:32:05 +08:00
|
|
|
void Preprocessor::CachingLex(Token &Result) {
|
2010-07-13 02:49:30 +08:00
|
|
|
if (!InCachingLexMode())
|
|
|
|
return;
|
|
|
|
|
2019-04-12 05:18:22 +08:00
|
|
|
// The assert in EnterCachingLexMode should prevent this from happening.
|
|
|
|
assert(LexLevel == 1 &&
|
|
|
|
"should not use token caching within the preprocessor");
|
|
|
|
|
2008-08-10 21:15:22 +08:00
|
|
|
if (CachedLexPos < CachedTokens.size()) {
|
|
|
|
Result = CachedTokens[CachedLexPos++];
|
2019-05-17 17:32:05 +08:00
|
|
|
Result.setFlag(Token::IsReinjected);
|
2008-08-10 21:15:22 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ExitCachingLexMode();
|
2010-07-13 05:41:41 +08:00
|
|
|
Lex(Result);
|
2008-08-10 21:15:22 +08:00
|
|
|
|
2012-04-04 10:57:01 +08:00
|
|
|
if (isBacktrackEnabled()) {
|
|
|
|
// Cache the lexed token.
|
2019-04-12 05:18:22 +08:00
|
|
|
EnterCachingLexModeUnchecked();
|
2012-04-04 10:57:01 +08:00
|
|
|
CachedTokens.push_back(Result);
|
|
|
|
++CachedLexPos;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (CachedLexPos < CachedTokens.size()) {
|
2019-04-12 05:18:22 +08:00
|
|
|
EnterCachingLexModeUnchecked();
|
2012-04-04 10:57:01 +08:00
|
|
|
} else {
|
2008-08-10 21:15:22 +08:00
|
|
|
// All cached tokens were consumed.
|
|
|
|
CachedTokens.clear();
|
|
|
|
CachedLexPos = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Preprocessor::EnterCachingLexMode() {
|
2019-04-12 05:18:22 +08:00
|
|
|
// The caching layer sits on top of all the other lexers, so it's incorrect
|
|
|
|
// to cache tokens while inside a nested lex action. The cached tokens would
|
|
|
|
// be retained after returning to the enclosing lex action and, at best,
|
|
|
|
// would appear at the wrong position in the token stream.
|
|
|
|
assert(LexLevel == 0 &&
|
|
|
|
"entered caching lex mode while lexing something else");
|
|
|
|
|
2018-01-20 07:41:47 +08:00
|
|
|
if (InCachingLexMode()) {
|
|
|
|
assert(CurLexerKind == CLK_CachingLexer && "Unexpected lexer kind");
|
2008-08-10 21:15:22 +08:00
|
|
|
return;
|
2018-01-20 07:41:47 +08:00
|
|
|
}
|
2008-08-10 21:15:22 +08:00
|
|
|
|
2019-04-12 05:18:22 +08:00
|
|
|
EnterCachingLexModeUnchecked();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Preprocessor::EnterCachingLexModeUnchecked() {
|
|
|
|
assert(CurLexerKind != CLK_CachingLexer && "already in caching lex mode");
|
2008-11-13 06:21:57 +08:00
|
|
|
PushIncludeMacroStack();
|
2012-01-04 14:20:15 +08:00
|
|
|
CurLexerKind = CLK_CachingLexer;
|
2008-08-10 21:15:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
const Token &Preprocessor::PeekAhead(unsigned N) {
|
|
|
|
assert(CachedLexPos + N > CachedTokens.size() && "Confused caching.");
|
|
|
|
ExitCachingLexMode();
|
2016-10-26 21:06:13 +08:00
|
|
|
for (size_t C = CachedLexPos + N - CachedTokens.size(); C > 0; --C) {
|
2008-08-10 21:15:22 +08:00
|
|
|
CachedTokens.push_back(Token());
|
|
|
|
Lex(CachedTokens.back());
|
|
|
|
}
|
|
|
|
EnterCachingLexMode();
|
|
|
|
return CachedTokens.back();
|
|
|
|
}
|
2008-11-09 00:17:04 +08:00
|
|
|
|
|
|
|
void Preprocessor::AnnotatePreviousCachedTokens(const Token &Tok) {
|
2009-01-27 03:29:26 +08:00
|
|
|
assert(Tok.isAnnotation() && "Expected annotation token");
|
2008-11-09 00:17:04 +08:00
|
|
|
assert(CachedLexPos != 0 && "Expected to have some cached tokens");
|
2010-02-09 03:35:18 +08:00
|
|
|
assert(CachedTokens[CachedLexPos-1].getLastLoc() == Tok.getAnnotationEndLoc()
|
2008-11-09 00:17:04 +08:00
|
|
|
&& "The annotation should be until the most recent cached token");
|
|
|
|
|
|
|
|
// Start from the end of the cached tokens list and look for the token
|
|
|
|
// that is the beginning of the annotation token.
|
|
|
|
for (CachedTokensTy::size_type i = CachedLexPos; i != 0; --i) {
|
|
|
|
CachedTokensTy::iterator AnnotBegin = CachedTokens.begin() + i-1;
|
|
|
|
if (AnnotBegin->getLocation() == Tok.getLocation()) {
|
2016-10-21 04:53:20 +08:00
|
|
|
assert((BacktrackPositions.empty() || BacktrackPositions.back() <= i) &&
|
2008-11-09 00:17:04 +08:00
|
|
|
"The backtrack pos points inside the annotated tokens!");
|
|
|
|
// Replace the cached tokens with the single annotation token.
|
2009-07-27 00:36:45 +08:00
|
|
|
if (i < CachedLexPos)
|
|
|
|
CachedTokens.erase(AnnotBegin + 1, CachedTokens.begin() + CachedLexPos);
|
2008-11-09 00:17:04 +08:00
|
|
|
*AnnotBegin = Tok;
|
|
|
|
CachedLexPos = i;
|
|
|
|
return;
|
|
|
|
}
|
2013-11-27 09:40:12 +08:00
|
|
|
}
|
|
|
|
}
|
2016-01-31 08:47:51 +08:00
|
|
|
|
|
|
|
bool Preprocessor::IsPreviousCachedToken(const Token &Tok) const {
|
|
|
|
// There's currently no cached token...
|
|
|
|
if (!CachedLexPos)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const Token LastCachedTok = CachedTokens[CachedLexPos - 1];
|
|
|
|
if (LastCachedTok.getKind() != Tok.getKind())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
int RelOffset = 0;
|
|
|
|
if ((!getSourceManager().isInSameSLocAddrSpace(
|
|
|
|
Tok.getLocation(), getLastCachedTokenLocation(), &RelOffset)) ||
|
|
|
|
RelOffset)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Preprocessor::ReplacePreviousCachedToken(ArrayRef<Token> NewToks) {
|
|
|
|
assert(CachedLexPos != 0 && "Expected to have some cached tokens");
|
|
|
|
CachedTokens.insert(CachedTokens.begin() + CachedLexPos - 1, NewToks.begin(),
|
|
|
|
NewToks.end());
|
|
|
|
CachedTokens.erase(CachedTokens.begin() + CachedLexPos - 1 + NewToks.size());
|
|
|
|
CachedLexPos += NewToks.size() - 1;
|
|
|
|
}
|