2015-12-30 18:24:40 +08:00
|
|
|
//===--- LexerUtils.cpp - clang-tidy---------------------------------------===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2015-12-30 18:24:40 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "LexerUtils.h"
|
|
|
|
|
|
|
|
namespace clang {
|
|
|
|
namespace tidy {
|
2016-05-03 10:54:05 +08:00
|
|
|
namespace utils {
|
|
|
|
namespace lexer {
|
2015-12-30 18:24:40 +08:00
|
|
|
|
2018-10-05 22:15:19 +08:00
|
|
|
Token getPreviousToken(SourceLocation Location, const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts, bool SkipComments) {
|
2015-12-30 18:24:40 +08:00
|
|
|
Token Token;
|
|
|
|
Token.setKind(tok::unknown);
|
|
|
|
Location = Location.getLocWithOffset(-1);
|
2018-10-05 22:15:19 +08:00
|
|
|
auto StartOfFile = SM.getLocForStartOfFile(SM.getFileID(Location));
|
2015-12-30 18:24:40 +08:00
|
|
|
while (Location != StartOfFile) {
|
2018-10-05 22:15:19 +08:00
|
|
|
Location = Lexer::GetBeginningOfToken(Location, SM, LangOpts);
|
|
|
|
if (!Lexer::getRawToken(Location, Token, SM, LangOpts) &&
|
2017-02-06 23:46:33 +08:00
|
|
|
(!SkipComments || !Token.is(tok::comment))) {
|
2015-12-30 18:24:40 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
Location = Location.getLocWithOffset(-1);
|
|
|
|
}
|
|
|
|
return Token;
|
|
|
|
}
|
|
|
|
|
[clang-tidy] new check 'readability-isolate-declaration'
Summary:
This patch introduces a new clang-tidy check that matches on all `declStmt` that declare more then one variable
and transform them into one statement per declaration if possible.
It currently only focusses on variable declarations but should be extended to cover more kinds of declarations in the future.
It is related to https://reviews.llvm.org/D27621 and does use it's extensive test-suite. Thank you to firolino for his work!
Reviewers: rsmith, aaron.ballman, alexfh, hokein, kbobyrev
Reviewed By: aaron.ballman
Subscribers: ZaMaZaN4iK, mgehre, nemanjai, kbarton, lebedev.ri, Eugene.Zelenko, mgorny, xazax.hun, cfe-commits
Tags: #clang-tools-extra
Differential Revision: https://reviews.llvm.org/D51949
llvm-svn: 345735
2018-11-01 00:50:44 +08:00
|
|
|
SourceLocation findPreviousTokenStart(SourceLocation Start,
|
|
|
|
const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts) {
|
|
|
|
if (Start.isInvalid() || Start.isMacroID())
|
|
|
|
return SourceLocation();
|
|
|
|
|
|
|
|
SourceLocation BeforeStart = Start.getLocWithOffset(-1);
|
|
|
|
if (BeforeStart.isInvalid() || BeforeStart.isMacroID())
|
|
|
|
return SourceLocation();
|
|
|
|
|
|
|
|
return Lexer::GetBeginningOfToken(BeforeStart, SM, LangOpts);
|
|
|
|
}
|
|
|
|
|
|
|
|
SourceLocation findPreviousTokenKind(SourceLocation Start,
|
|
|
|
const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts,
|
|
|
|
tok::TokenKind TK) {
|
|
|
|
while (true) {
|
|
|
|
SourceLocation L = findPreviousTokenStart(Start, SM, LangOpts);
|
|
|
|
if (L.isInvalid() || L.isMacroID())
|
|
|
|
return SourceLocation();
|
|
|
|
|
|
|
|
Token T;
|
|
|
|
if (Lexer::getRawToken(L, T, SM, LangOpts, /*IgnoreWhiteSpace=*/true))
|
|
|
|
return SourceLocation();
|
|
|
|
|
|
|
|
if (T.is(TK))
|
|
|
|
return T.getLocation();
|
|
|
|
|
|
|
|
Start = L;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
SourceLocation findNextTerminator(SourceLocation Start, const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts) {
|
|
|
|
return findNextAnyTokenKind(Start, SM, LangOpts, tok::comma, tok::semi);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool rangeContainsExpansionsOrDirectives(SourceRange Range,
|
|
|
|
const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts) {
|
|
|
|
assert(Range.isValid() && "Invalid Range for relexing provided");
|
|
|
|
SourceLocation Loc = Range.getBegin();
|
|
|
|
|
|
|
|
while (Loc < Range.getEnd()) {
|
|
|
|
if (Loc.isMacroID())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
llvm::Optional<Token> Tok = Lexer::findNextToken(Loc, SM, LangOpts);
|
|
|
|
|
|
|
|
if (!Tok)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (Tok->is(tok::hash))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
Loc = Lexer::getLocForEndOfToken(Loc, 0, SM, LangOpts).getLocWithOffset(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2018-11-01 03:11:38 +08:00
|
|
|
|
|
|
|
llvm::Optional<Token> getConstQualifyingToken(CharSourceRange Range,
|
|
|
|
const ASTContext &Context,
|
|
|
|
const SourceManager &SM) {
|
|
|
|
std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Range.getBegin());
|
|
|
|
StringRef File = SM.getBufferData(LocInfo.first);
|
|
|
|
Lexer RawLexer(SM.getLocForStartOfFile(LocInfo.first), Context.getLangOpts(),
|
|
|
|
File.begin(), File.data() + LocInfo.second, File.end());
|
|
|
|
llvm::Optional<Token> FirstConstTok;
|
|
|
|
Token LastTokInRange;
|
|
|
|
Token Tok;
|
|
|
|
while (!RawLexer.LexFromRawLexer(Tok) &&
|
|
|
|
Range.getEnd() != Tok.getLocation() &&
|
|
|
|
!SM.isBeforeInTranslationUnit(Range.getEnd(), Tok.getLocation())) {
|
|
|
|
if (Tok.is(tok::raw_identifier)) {
|
|
|
|
IdentifierInfo &Info = Context.Idents.get(
|
|
|
|
StringRef(SM.getCharacterData(Tok.getLocation()), Tok.getLength()));
|
|
|
|
Tok.setIdentifierInfo(&Info);
|
|
|
|
Tok.setKind(Info.getTokenID());
|
|
|
|
}
|
|
|
|
if (Tok.is(tok::kw_const) && !FirstConstTok)
|
|
|
|
FirstConstTok = Tok;
|
|
|
|
LastTokInRange = Tok;
|
|
|
|
}
|
|
|
|
// If the last token in the range is a `const`, then it const qualifies the
|
|
|
|
// type. Otherwise, the first `const` token, if any, is the qualifier.
|
|
|
|
return LastTokInRange.is(tok::kw_const) ? LastTokInRange : FirstConstTok;
|
|
|
|
}
|
2016-05-03 10:54:05 +08:00
|
|
|
} // namespace lexer
|
|
|
|
} // namespace utils
|
2015-12-30 18:24:40 +08:00
|
|
|
} // namespace tidy
|
|
|
|
} // namespace clang
|