2006-08-11 02:43:39 +08:00
|
|
|
//===--- Parser.cpp - C Language Family Parser ----------------------------===//
|
2006-07-31 09:59:18 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 03:59:25 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2006-07-31 09:59:18 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements the Parser interfaces.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "clang/Parse/Parser.h"
|
2009-01-29 13:15:15 +08:00
|
|
|
#include "clang/Parse/ParseDiagnostic.h"
|
2006-11-12 07:03:42 +08:00
|
|
|
#include "clang/Parse/DeclSpec.h"
|
2006-08-06 06:46:42 +08:00
|
|
|
#include "clang/Parse/Scope.h"
|
2009-03-05 15:24:28 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2008-10-20 14:45:43 +08:00
|
|
|
#include "ExtensionRAIIObject.h"
|
2008-10-05 03:21:03 +08:00
|
|
|
#include "ParsePragma.h"
|
2006-07-31 09:59:18 +08:00
|
|
|
using namespace clang;
|
|
|
|
|
Add support for retrieving the Doxygen comment associated with a given
declaration in the AST.
The new ASTContext::getCommentForDecl function searches for a comment
that is attached to the given declaration, and returns that comment,
which may be composed of several comment blocks.
Comments are always available in an AST. However, to avoid harming
performance, we don't actually parse the comments. Rather, we keep the
source ranges of all of the comments within a large, sorted vector,
then lazily extract comments via a binary search in that vector only
when needed (which never occurs in a "normal" compile).
Comments are written to a precompiled header/AST file as a blob of
source ranges. That blob is only lazily loaded when one requests a
comment for a declaration (this never occurs in a "normal" compile).
The indexer testbed now supports comment extraction. When the
-point-at location points to a declaration with a Doxygen-style
comment, the indexer testbed prints the associated comment
block(s). See test/Index/comments.c for an example.
Some notes:
- We don't actually attempt to parse the comment blocks themselves,
beyond identifying them as Doxygen comment blocks to associate them
with a declaration.
- We won't find comment blocks that aren't adjacent to the
declaration, because we start our search based on the location of
the declaration.
- We don't go through the necessary hops to find, for example,
whether some redeclaration of a declaration has comments when our
current declaration does not. Similarly, we don't attempt to
associate a \param Foo marker in a function body comment with the
parameter named Foo (although that is certainly possible).
- Verification of my "no performance impact" claims is still "to be
done".
llvm-svn: 74704
2009-07-03 01:08:52 +08:00
|
|
|
/// \brief A comment handler that passes comments found by the preprocessor
|
|
|
|
/// to the parser action.
|
|
|
|
class ActionCommentHandler : public CommentHandler {
|
|
|
|
Action &Actions;
|
|
|
|
|
|
|
|
public:
|
|
|
|
explicit ActionCommentHandler(Action &Actions) : Actions(Actions) { }
|
|
|
|
|
|
|
|
virtual void HandleComment(Preprocessor &PP, SourceRange Comment) {
|
|
|
|
Actions.ActOnComment(Comment);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2006-11-09 14:32:27 +08:00
|
|
|
Parser::Parser(Preprocessor &pp, Action &actions)
|
2009-03-05 15:24:28 +08:00
|
|
|
: CrashInfo(*this), PP(pp), Actions(actions), Diags(PP.getDiagnostics()),
|
2009-02-10 02:46:07 +08:00
|
|
|
GreaterThanIsOperator(true) {
|
2006-10-14 13:19:21 +08:00
|
|
|
Tok.setKind(tok::eof);
|
2006-08-14 08:15:05 +08:00
|
|
|
CurScope = 0;
|
2007-07-15 08:04:39 +08:00
|
|
|
NumCachedScopes = 0;
|
2006-08-07 05:55:29 +08:00
|
|
|
ParenCount = BracketCount = BraceCount = 0;
|
2009-03-29 03:18:32 +08:00
|
|
|
ObjCImpDecl = DeclPtrTy();
|
2008-10-05 03:21:03 +08:00
|
|
|
|
|
|
|
// Add #pragma handlers. These are removed and destroyed in the
|
|
|
|
// destructor.
|
2009-03-24 06:28:25 +08:00
|
|
|
PackHandler.reset(new
|
|
|
|
PragmaPackHandler(&PP.getIdentifierTable().get("pack"), actions));
|
|
|
|
PP.AddPragmaHandler(0, PackHandler.get());
|
|
|
|
|
|
|
|
UnusedHandler.reset(new
|
|
|
|
PragmaUnusedHandler(&PP.getIdentifierTable().get("unused"), actions,
|
|
|
|
*this));
|
|
|
|
PP.AddPragmaHandler(0, UnusedHandler.get());
|
2009-06-05 08:49:58 +08:00
|
|
|
|
|
|
|
WeakHandler.reset(new
|
|
|
|
PragmaWeakHandler(&PP.getIdentifierTable().get("weak"), actions));
|
|
|
|
PP.AddPragmaHandler(0, WeakHandler.get());
|
Add support for retrieving the Doxygen comment associated with a given
declaration in the AST.
The new ASTContext::getCommentForDecl function searches for a comment
that is attached to the given declaration, and returns that comment,
which may be composed of several comment blocks.
Comments are always available in an AST. However, to avoid harming
performance, we don't actually parse the comments. Rather, we keep the
source ranges of all of the comments within a large, sorted vector,
then lazily extract comments via a binary search in that vector only
when needed (which never occurs in a "normal" compile).
Comments are written to a precompiled header/AST file as a blob of
source ranges. That blob is only lazily loaded when one requests a
comment for a declaration (this never occurs in a "normal" compile).
The indexer testbed now supports comment extraction. When the
-point-at location points to a declaration with a Doxygen-style
comment, the indexer testbed prints the associated comment
block(s). See test/Index/comments.c for an example.
Some notes:
- We don't actually attempt to parse the comment blocks themselves,
beyond identifying them as Doxygen comment blocks to associate them
with a declaration.
- We won't find comment blocks that aren't adjacent to the
declaration, because we start our search based on the location of
the declaration.
- We don't go through the necessary hops to find, for example,
whether some redeclaration of a declaration has comments when our
current declaration does not. Similarly, we don't attempt to
associate a \param Foo marker in a function body comment with the
parameter named Foo (although that is certainly possible).
- Verification of my "no performance impact" claims is still "to be
done".
llvm-svn: 74704
2009-07-03 01:08:52 +08:00
|
|
|
|
|
|
|
CommentHandler.reset(new ActionCommentHandler(actions));
|
|
|
|
PP.AddCommentHandler(CommentHandler.get());
|
2006-08-06 06:46:42 +08:00
|
|
|
}
|
|
|
|
|
2009-03-05 15:24:28 +08:00
|
|
|
/// If a crash happens while the parser is active, print out a line indicating
|
|
|
|
/// what the current token is.
|
|
|
|
void PrettyStackTraceParserEntry::print(llvm::raw_ostream &OS) const {
|
|
|
|
const Token &Tok = P.getCurToken();
|
2009-03-05 15:27:50 +08:00
|
|
|
if (Tok.is(tok::eof)) {
|
2009-03-05 15:24:28 +08:00
|
|
|
OS << "<eof> parser at end of file\n";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-03-05 15:27:50 +08:00
|
|
|
if (Tok.getLocation().isInvalid()) {
|
|
|
|
OS << "<unknown> parser at unknown location\n";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-03-05 15:24:28 +08:00
|
|
|
const Preprocessor &PP = P.getPreprocessor();
|
|
|
|
Tok.getLocation().print(OS, PP.getSourceManager());
|
|
|
|
OS << ": current parser token '" << PP.getSpelling(Tok) << "'\n";
|
2008-12-30 11:27:21 +08:00
|
|
|
}
|
2006-07-31 09:59:18 +08:00
|
|
|
|
2009-03-05 15:24:28 +08:00
|
|
|
|
2008-11-22 08:59:29 +08:00
|
|
|
DiagnosticBuilder Parser::Diag(SourceLocation Loc, unsigned DiagID) {
|
2009-03-05 15:24:28 +08:00
|
|
|
return Diags.Report(FullSourceLoc(Loc, PP.getSourceManager()), DiagID);
|
2008-11-18 15:48:38 +08:00
|
|
|
}
|
|
|
|
|
2008-11-22 08:59:29 +08:00
|
|
|
DiagnosticBuilder Parser::Diag(const Token &Tok, unsigned DiagID) {
|
2008-11-18 15:48:38 +08:00
|
|
|
return Diag(Tok.getLocation(), DiagID);
|
2006-07-31 09:59:18 +08:00
|
|
|
}
|
|
|
|
|
Introduce code modification hints into the diagnostics system. When we
know how to recover from an error, we can attach a hint to the
diagnostic that states how to modify the code, which can be one of:
- Insert some new code (a text string) at a particular source
location
- Remove the code within a given range
- Replace the code within a given range with some new code (a text
string)
Right now, we use these hints to annotate diagnostic information. For
example, if one uses the '>>' in a template argument in C++98, as in
this code:
template<int I> class B { };
B<1000 >> 2> *b1;
we'll warn that the behavior will change in C++0x. The fix is to
insert parenthese, so we use code insertion annotations to illustrate
where the parentheses go:
test.cpp:10:10: warning: use of right-shift operator ('>>') in template
argument will require parentheses in C++0x
B<1000 >> 2> *b1;
^
( )
Use of these annotations is partially implemented for HTML
diagnostics, but it's not (yet) producing valid HTML, which may be
related to PR2386, so it has been #if 0'd out.
In this future, we could consider hooking this mechanism up to the
rewriter to actually try to fix these problems during compilation (or,
after a compilation whose only errors have fixes). For now, however, I
suggest that we use these code modification hints whenever we can, so
that we get better diagnostics now and will have better coverage when
we find better ways to use this information.
This also fixes PR3410 by placing the complaint about missing tokens
just after the previous token (rather than at the location of the next
token).
llvm-svn: 65570
2009-02-27 05:00:50 +08:00
|
|
|
/// \brief Emits a diagnostic suggesting parentheses surrounding a
|
|
|
|
/// given range.
|
|
|
|
///
|
|
|
|
/// \param Loc The location where we'll emit the diagnostic.
|
|
|
|
/// \param Loc The kind of diagnostic to emit.
|
|
|
|
/// \param ParenRange Source range enclosing code that should be parenthesized.
|
|
|
|
void Parser::SuggestParentheses(SourceLocation Loc, unsigned DK,
|
|
|
|
SourceRange ParenRange) {
|
2009-02-28 01:53:17 +08:00
|
|
|
SourceLocation EndLoc = PP.getLocForEndOfToken(ParenRange.getEnd());
|
|
|
|
if (!ParenRange.getEnd().isFileID() || EndLoc.isInvalid()) {
|
Introduce code modification hints into the diagnostics system. When we
know how to recover from an error, we can attach a hint to the
diagnostic that states how to modify the code, which can be one of:
- Insert some new code (a text string) at a particular source
location
- Remove the code within a given range
- Replace the code within a given range with some new code (a text
string)
Right now, we use these hints to annotate diagnostic information. For
example, if one uses the '>>' in a template argument in C++98, as in
this code:
template<int I> class B { };
B<1000 >> 2> *b1;
we'll warn that the behavior will change in C++0x. The fix is to
insert parenthese, so we use code insertion annotations to illustrate
where the parentheses go:
test.cpp:10:10: warning: use of right-shift operator ('>>') in template
argument will require parentheses in C++0x
B<1000 >> 2> *b1;
^
( )
Use of these annotations is partially implemented for HTML
diagnostics, but it's not (yet) producing valid HTML, which may be
related to PR2386, so it has been #if 0'd out.
In this future, we could consider hooking this mechanism up to the
rewriter to actually try to fix these problems during compilation (or,
after a compilation whose only errors have fixes). For now, however, I
suggest that we use these code modification hints whenever we can, so
that we get better diagnostics now and will have better coverage when
we find better ways to use this information.
This also fixes PR3410 by placing the complaint about missing tokens
just after the previous token (rather than at the location of the next
token).
llvm-svn: 65570
2009-02-27 05:00:50 +08:00
|
|
|
// We can't display the parentheses, so just dig the
|
|
|
|
// warning/error and return.
|
|
|
|
Diag(Loc, DK);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
Diag(Loc, DK)
|
2009-02-28 01:53:17 +08:00
|
|
|
<< CodeModificationHint::CreateInsertion(ParenRange.getBegin(), "(")
|
|
|
|
<< CodeModificationHint::CreateInsertion(EndLoc, ")");
|
Introduce code modification hints into the diagnostics system. When we
know how to recover from an error, we can attach a hint to the
diagnostic that states how to modify the code, which can be one of:
- Insert some new code (a text string) at a particular source
location
- Remove the code within a given range
- Replace the code within a given range with some new code (a text
string)
Right now, we use these hints to annotate diagnostic information. For
example, if one uses the '>>' in a template argument in C++98, as in
this code:
template<int I> class B { };
B<1000 >> 2> *b1;
we'll warn that the behavior will change in C++0x. The fix is to
insert parenthese, so we use code insertion annotations to illustrate
where the parentheses go:
test.cpp:10:10: warning: use of right-shift operator ('>>') in template
argument will require parentheses in C++0x
B<1000 >> 2> *b1;
^
( )
Use of these annotations is partially implemented for HTML
diagnostics, but it's not (yet) producing valid HTML, which may be
related to PR2386, so it has been #if 0'd out.
In this future, we could consider hooking this mechanism up to the
rewriter to actually try to fix these problems during compilation (or,
after a compilation whose only errors have fixes). For now, however, I
suggest that we use these code modification hints whenever we can, so
that we get better diagnostics now and will have better coverage when
we find better ways to use this information.
This also fixes PR3410 by placing the complaint about missing tokens
just after the previous token (rather than at the location of the next
token).
llvm-svn: 65570
2009-02-27 05:00:50 +08:00
|
|
|
}
|
|
|
|
|
2006-08-11 07:14:52 +08:00
|
|
|
/// MatchRHSPunctuation - For punctuation with a LHS and RHS (e.g. '['/']'),
|
|
|
|
/// this helper function matches and consumes the specified RHS token if
|
|
|
|
/// present. If not present, it emits the specified diagnostic indicating
|
|
|
|
/// that the parser failed to match the RHS of the token at LHSLoc. LHSName
|
|
|
|
/// should be the name of the unmatched LHS token.
|
2006-11-05 04:18:38 +08:00
|
|
|
SourceLocation Parser::MatchRHSPunctuation(tok::TokenKind RHSTok,
|
|
|
|
SourceLocation LHSLoc) {
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.is(RHSTok))
|
2006-11-05 04:18:38 +08:00
|
|
|
return ConsumeAnyToken();
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-11-05 04:18:38 +08:00
|
|
|
SourceLocation R = Tok.getLocation();
|
|
|
|
const char *LHSName = "unknown";
|
|
|
|
diag::kind DID = diag::err_parse_error;
|
|
|
|
switch (RHSTok) {
|
|
|
|
default: break;
|
|
|
|
case tok::r_paren : LHSName = "("; DID = diag::err_expected_rparen; break;
|
|
|
|
case tok::r_brace : LHSName = "{"; DID = diag::err_expected_rbrace; break;
|
|
|
|
case tok::r_square: LHSName = "["; DID = diag::err_expected_rsquare; break;
|
2006-12-05 02:06:35 +08:00
|
|
|
case tok::greater: LHSName = "<"; DID = diag::err_expected_greater; break;
|
2006-08-11 07:14:52 +08:00
|
|
|
}
|
2006-11-05 04:18:38 +08:00
|
|
|
Diag(Tok, DID);
|
2008-11-24 07:17:07 +08:00
|
|
|
Diag(LHSLoc, diag::note_matching) << LHSName;
|
2006-11-05 04:18:38 +08:00
|
|
|
SkipUntil(RHSTok);
|
|
|
|
return R;
|
2006-08-11 07:14:52 +08:00
|
|
|
}
|
|
|
|
|
2006-08-13 03:26:13 +08:00
|
|
|
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
|
|
|
|
/// input. If so, it is consumed and false is returned.
|
|
|
|
///
|
|
|
|
/// If the input is malformed, this emits the specified diagnostic. Next, if
|
|
|
|
/// SkipToTok is specified, it calls SkipUntil(SkipToTok). Finally, true is
|
|
|
|
/// returned.
|
|
|
|
bool Parser::ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned DiagID,
|
2006-08-15 11:41:14 +08:00
|
|
|
const char *Msg, tok::TokenKind SkipToTok) {
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.is(ExpectedTok)) {
|
2006-08-15 12:10:31 +08:00
|
|
|
ConsumeAnyToken();
|
2006-08-13 03:26:13 +08:00
|
|
|
return false;
|
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
Introduce code modification hints into the diagnostics system. When we
know how to recover from an error, we can attach a hint to the
diagnostic that states how to modify the code, which can be one of:
- Insert some new code (a text string) at a particular source
location
- Remove the code within a given range
- Replace the code within a given range with some new code (a text
string)
Right now, we use these hints to annotate diagnostic information. For
example, if one uses the '>>' in a template argument in C++98, as in
this code:
template<int I> class B { };
B<1000 >> 2> *b1;
we'll warn that the behavior will change in C++0x. The fix is to
insert parenthese, so we use code insertion annotations to illustrate
where the parentheses go:
test.cpp:10:10: warning: use of right-shift operator ('>>') in template
argument will require parentheses in C++0x
B<1000 >> 2> *b1;
^
( )
Use of these annotations is partially implemented for HTML
diagnostics, but it's not (yet) producing valid HTML, which may be
related to PR2386, so it has been #if 0'd out.
In this future, we could consider hooking this mechanism up to the
rewriter to actually try to fix these problems during compilation (or,
after a compilation whose only errors have fixes). For now, however, I
suggest that we use these code modification hints whenever we can, so
that we get better diagnostics now and will have better coverage when
we find better ways to use this information.
This also fixes PR3410 by placing the complaint about missing tokens
just after the previous token (rather than at the location of the next
token).
llvm-svn: 65570
2009-02-27 05:00:50 +08:00
|
|
|
const char *Spelling = 0;
|
2009-02-28 01:53:17 +08:00
|
|
|
SourceLocation EndLoc = PP.getLocForEndOfToken(PrevTokLocation);
|
|
|
|
if (EndLoc.isValid() &&
|
|
|
|
(Spelling = tok::getTokenSimpleSpelling(ExpectedTok))) {
|
Introduce code modification hints into the diagnostics system. When we
know how to recover from an error, we can attach a hint to the
diagnostic that states how to modify the code, which can be one of:
- Insert some new code (a text string) at a particular source
location
- Remove the code within a given range
- Replace the code within a given range with some new code (a text
string)
Right now, we use these hints to annotate diagnostic information. For
example, if one uses the '>>' in a template argument in C++98, as in
this code:
template<int I> class B { };
B<1000 >> 2> *b1;
we'll warn that the behavior will change in C++0x. The fix is to
insert parenthese, so we use code insertion annotations to illustrate
where the parentheses go:
test.cpp:10:10: warning: use of right-shift operator ('>>') in template
argument will require parentheses in C++0x
B<1000 >> 2> *b1;
^
( )
Use of these annotations is partially implemented for HTML
diagnostics, but it's not (yet) producing valid HTML, which may be
related to PR2386, so it has been #if 0'd out.
In this future, we could consider hooking this mechanism up to the
rewriter to actually try to fix these problems during compilation (or,
after a compilation whose only errors have fixes). For now, however, I
suggest that we use these code modification hints whenever we can, so
that we get better diagnostics now and will have better coverage when
we find better ways to use this information.
This also fixes PR3410 by placing the complaint about missing tokens
just after the previous token (rather than at the location of the next
token).
llvm-svn: 65570
2009-02-27 05:00:50 +08:00
|
|
|
// Show what code to insert to fix this problem.
|
2009-02-28 01:53:17 +08:00
|
|
|
Diag(EndLoc, DiagID)
|
Introduce code modification hints into the diagnostics system. When we
know how to recover from an error, we can attach a hint to the
diagnostic that states how to modify the code, which can be one of:
- Insert some new code (a text string) at a particular source
location
- Remove the code within a given range
- Replace the code within a given range with some new code (a text
string)
Right now, we use these hints to annotate diagnostic information. For
example, if one uses the '>>' in a template argument in C++98, as in
this code:
template<int I> class B { };
B<1000 >> 2> *b1;
we'll warn that the behavior will change in C++0x. The fix is to
insert parenthese, so we use code insertion annotations to illustrate
where the parentheses go:
test.cpp:10:10: warning: use of right-shift operator ('>>') in template
argument will require parentheses in C++0x
B<1000 >> 2> *b1;
^
( )
Use of these annotations is partially implemented for HTML
diagnostics, but it's not (yet) producing valid HTML, which may be
related to PR2386, so it has been #if 0'd out.
In this future, we could consider hooking this mechanism up to the
rewriter to actually try to fix these problems during compilation (or,
after a compilation whose only errors have fixes). For now, however, I
suggest that we use these code modification hints whenever we can, so
that we get better diagnostics now and will have better coverage when
we find better ways to use this information.
This also fixes PR3410 by placing the complaint about missing tokens
just after the previous token (rather than at the location of the next
token).
llvm-svn: 65570
2009-02-27 05:00:50 +08:00
|
|
|
<< Msg
|
2009-02-28 01:53:17 +08:00
|
|
|
<< CodeModificationHint::CreateInsertion(EndLoc, Spelling);
|
Introduce code modification hints into the diagnostics system. When we
know how to recover from an error, we can attach a hint to the
diagnostic that states how to modify the code, which can be one of:
- Insert some new code (a text string) at a particular source
location
- Remove the code within a given range
- Replace the code within a given range with some new code (a text
string)
Right now, we use these hints to annotate diagnostic information. For
example, if one uses the '>>' in a template argument in C++98, as in
this code:
template<int I> class B { };
B<1000 >> 2> *b1;
we'll warn that the behavior will change in C++0x. The fix is to
insert parenthese, so we use code insertion annotations to illustrate
where the parentheses go:
test.cpp:10:10: warning: use of right-shift operator ('>>') in template
argument will require parentheses in C++0x
B<1000 >> 2> *b1;
^
( )
Use of these annotations is partially implemented for HTML
diagnostics, but it's not (yet) producing valid HTML, which may be
related to PR2386, so it has been #if 0'd out.
In this future, we could consider hooking this mechanism up to the
rewriter to actually try to fix these problems during compilation (or,
after a compilation whose only errors have fixes). For now, however, I
suggest that we use these code modification hints whenever we can, so
that we get better diagnostics now and will have better coverage when
we find better ways to use this information.
This also fixes PR3410 by placing the complaint about missing tokens
just after the previous token (rather than at the location of the next
token).
llvm-svn: 65570
2009-02-27 05:00:50 +08:00
|
|
|
} else
|
|
|
|
Diag(Tok, DiagID) << Msg;
|
|
|
|
|
2006-08-13 03:26:13 +08:00
|
|
|
if (SkipToTok != tok::unknown)
|
|
|
|
SkipUntil(SkipToTok);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2006-08-07 05:55:29 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Error recovery.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// SkipUntil - Read tokens until we get to the specified token, then consume
|
2007-07-25 01:03:04 +08:00
|
|
|
/// it (unless DontConsume is true). Because we cannot guarantee that the
|
2006-08-07 05:55:29 +08:00
|
|
|
/// token will ever occur, this skips to the next token, or to some likely
|
|
|
|
/// good stopping point. If StopAtSemi is true, skipping will stop at a ';'
|
|
|
|
/// character.
|
2008-06-20 03:28:49 +08:00
|
|
|
///
|
2006-08-07 05:55:29 +08:00
|
|
|
/// If SkipUntil finds the specified token, it returns true, otherwise it
|
2008-06-20 03:28:49 +08:00
|
|
|
/// returns false.
|
2007-04-28 03:12:15 +08:00
|
|
|
bool Parser::SkipUntil(const tok::TokenKind *Toks, unsigned NumToks,
|
|
|
|
bool StopAtSemi, bool DontConsume) {
|
2006-08-11 14:40:25 +08:00
|
|
|
// We always want this function to skip at least one token if the first token
|
|
|
|
// isn't T and if not at EOF.
|
|
|
|
bool isFirstTokenSkipped = true;
|
2006-08-07 05:55:29 +08:00
|
|
|
while (1) {
|
2007-04-28 03:12:15 +08:00
|
|
|
// If we found one of the tokens, stop and return true.
|
|
|
|
for (unsigned i = 0; i != NumToks; ++i) {
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.is(Toks[i])) {
|
2007-04-28 03:12:15 +08:00
|
|
|
if (DontConsume) {
|
|
|
|
// Noop, don't consume the token.
|
|
|
|
} else {
|
|
|
|
ConsumeAnyToken();
|
|
|
|
}
|
|
|
|
return true;
|
2006-08-07 05:55:29 +08:00
|
|
|
}
|
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-08-07 05:55:29 +08:00
|
|
|
switch (Tok.getKind()) {
|
|
|
|
case tok::eof:
|
|
|
|
// Ran out of tokens.
|
|
|
|
return false;
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-08-07 05:55:29 +08:00
|
|
|
case tok::l_paren:
|
|
|
|
// Recursively skip properly-nested parens.
|
|
|
|
ConsumeParen();
|
2006-08-11 14:40:25 +08:00
|
|
|
SkipUntil(tok::r_paren, false);
|
2006-08-07 05:55:29 +08:00
|
|
|
break;
|
|
|
|
case tok::l_square:
|
|
|
|
// Recursively skip properly-nested square brackets.
|
|
|
|
ConsumeBracket();
|
2006-08-11 14:40:25 +08:00
|
|
|
SkipUntil(tok::r_square, false);
|
2006-08-07 05:55:29 +08:00
|
|
|
break;
|
|
|
|
case tok::l_brace:
|
|
|
|
// Recursively skip properly-nested braces.
|
|
|
|
ConsumeBrace();
|
2006-08-11 14:40:25 +08:00
|
|
|
SkipUntil(tok::r_brace, false);
|
2006-08-07 05:55:29 +08:00
|
|
|
break;
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-08-07 05:55:29 +08:00
|
|
|
// Okay, we found a ']' or '}' or ')', which we think should be balanced.
|
|
|
|
// Since the user wasn't looking for this token (if they were, it would
|
|
|
|
// already be handled), this isn't balanced. If there is a LHS token at a
|
|
|
|
// higher level, we will assume that this matches the unbalanced token
|
|
|
|
// and return it. Otherwise, this is a spurious RHS token, which we skip.
|
|
|
|
case tok::r_paren:
|
2006-08-11 14:40:25 +08:00
|
|
|
if (ParenCount && !isFirstTokenSkipped)
|
|
|
|
return false; // Matches something.
|
2006-08-07 05:55:29 +08:00
|
|
|
ConsumeParen();
|
|
|
|
break;
|
|
|
|
case tok::r_square:
|
2006-08-11 14:40:25 +08:00
|
|
|
if (BracketCount && !isFirstTokenSkipped)
|
|
|
|
return false; // Matches something.
|
2006-08-07 05:55:29 +08:00
|
|
|
ConsumeBracket();
|
|
|
|
break;
|
|
|
|
case tok::r_brace:
|
2006-08-11 14:40:25 +08:00
|
|
|
if (BraceCount && !isFirstTokenSkipped)
|
|
|
|
return false; // Matches something.
|
2006-08-07 05:55:29 +08:00
|
|
|
ConsumeBrace();
|
|
|
|
break;
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-08-07 05:55:29 +08:00
|
|
|
case tok::string_literal:
|
2006-10-06 13:22:26 +08:00
|
|
|
case tok::wide_string_literal:
|
2006-08-07 05:55:29 +08:00
|
|
|
ConsumeStringToken();
|
|
|
|
break;
|
|
|
|
case tok::semi:
|
|
|
|
if (StopAtSemi)
|
|
|
|
return false;
|
|
|
|
// FALL THROUGH.
|
|
|
|
default:
|
|
|
|
// Skip this token.
|
|
|
|
ConsumeToken();
|
|
|
|
break;
|
|
|
|
}
|
2006-08-11 14:40:25 +08:00
|
|
|
isFirstTokenSkipped = false;
|
2008-06-20 03:28:49 +08:00
|
|
|
}
|
2006-08-07 05:55:29 +08:00
|
|
|
}
|
|
|
|
|
2006-08-14 08:15:05 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Scope manipulation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// EnterScope - Start a new scope.
|
2006-11-06 07:47:55 +08:00
|
|
|
void Parser::EnterScope(unsigned ScopeFlags) {
|
2007-07-15 08:04:39 +08:00
|
|
|
if (NumCachedScopes) {
|
|
|
|
Scope *N = ScopeCache[--NumCachedScopes];
|
2006-11-06 08:22:42 +08:00
|
|
|
N->Init(CurScope, ScopeFlags);
|
|
|
|
CurScope = N;
|
|
|
|
} else {
|
|
|
|
CurScope = new Scope(CurScope, ScopeFlags);
|
|
|
|
}
|
2006-08-14 08:15:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// ExitScope - Pop a scope off the scope stack.
|
|
|
|
void Parser::ExitScope() {
|
|
|
|
assert(CurScope && "Scope imbalance!");
|
|
|
|
|
2007-10-10 04:37:18 +08:00
|
|
|
// Inform the actions module that this scope is going away if there are any
|
|
|
|
// decls in it.
|
|
|
|
if (!CurScope->decl_empty())
|
2007-10-10 06:01:59 +08:00
|
|
|
Actions.ActOnPopScope(Tok.getLocation(), CurScope);
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2007-07-15 08:04:39 +08:00
|
|
|
Scope *OldScope = CurScope;
|
|
|
|
CurScope = OldScope->getParent();
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2007-07-15 08:04:39 +08:00
|
|
|
if (NumCachedScopes == ScopeCacheSize)
|
|
|
|
delete OldScope;
|
2006-11-06 08:22:42 +08:00
|
|
|
else
|
2007-07-15 08:04:39 +08:00
|
|
|
ScopeCache[NumCachedScopes++] = OldScope;
|
2006-08-14 08:15:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2006-07-31 13:09:04 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// C99 6.9: External Definitions.
|
|
|
|
//===----------------------------------------------------------------------===//
|
2006-07-31 09:59:18 +08:00
|
|
|
|
2006-11-06 08:22:42 +08:00
|
|
|
Parser::~Parser() {
|
|
|
|
// If we still have scopes active, delete the scope tree.
|
|
|
|
delete CurScope;
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-11-06 08:22:42 +08:00
|
|
|
// Free the scope cache.
|
2007-07-15 08:04:39 +08:00
|
|
|
for (unsigned i = 0, e = NumCachedScopes; i != e; ++i)
|
|
|
|
delete ScopeCache[i];
|
2008-10-05 03:21:03 +08:00
|
|
|
|
|
|
|
// Remove the pragma handlers we installed.
|
2009-03-24 06:28:25 +08:00
|
|
|
PP.RemovePragmaHandler(0, PackHandler.get());
|
|
|
|
PackHandler.reset();
|
|
|
|
PP.RemovePragmaHandler(0, UnusedHandler.get());
|
|
|
|
UnusedHandler.reset();
|
2009-06-05 08:49:58 +08:00
|
|
|
PP.RemovePragmaHandler(0, WeakHandler.get());
|
|
|
|
WeakHandler.reset();
|
Add support for retrieving the Doxygen comment associated with a given
declaration in the AST.
The new ASTContext::getCommentForDecl function searches for a comment
that is attached to the given declaration, and returns that comment,
which may be composed of several comment blocks.
Comments are always available in an AST. However, to avoid harming
performance, we don't actually parse the comments. Rather, we keep the
source ranges of all of the comments within a large, sorted vector,
then lazily extract comments via a binary search in that vector only
when needed (which never occurs in a "normal" compile).
Comments are written to a precompiled header/AST file as a blob of
source ranges. That blob is only lazily loaded when one requests a
comment for a declaration (this never occurs in a "normal" compile).
The indexer testbed now supports comment extraction. When the
-point-at location points to a declaration with a Doxygen-style
comment, the indexer testbed prints the associated comment
block(s). See test/Index/comments.c for an example.
Some notes:
- We don't actually attempt to parse the comment blocks themselves,
beyond identifying them as Doxygen comment blocks to associate them
with a declaration.
- We won't find comment blocks that aren't adjacent to the
declaration, because we start our search based on the location of
the declaration.
- We don't go through the necessary hops to find, for example,
whether some redeclaration of a declaration has comments when our
current declaration does not. Similarly, we don't attempt to
associate a \param Foo marker in a function body comment with the
parameter named Foo (although that is certainly possible).
- Verification of my "no performance impact" claims is still "to be
done".
llvm-svn: 74704
2009-07-03 01:08:52 +08:00
|
|
|
PP.RemoveCommentHandler(CommentHandler.get());
|
2006-11-06 08:22:42 +08:00
|
|
|
}
|
|
|
|
|
2006-08-17 15:04:37 +08:00
|
|
|
/// Initialize - Warm up the parser.
|
|
|
|
///
|
|
|
|
void Parser::Initialize() {
|
2006-08-14 08:15:05 +08:00
|
|
|
// Prime the lexer look-ahead.
|
|
|
|
ConsumeToken();
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2007-08-26 14:24:45 +08:00
|
|
|
// Create the translation unit scope. Install it as the current scope.
|
2006-08-14 08:15:05 +08:00
|
|
|
assert(CurScope == 0 && "A scope is already active?");
|
2007-08-26 14:24:45 +08:00
|
|
|
EnterScope(Scope::DeclScope);
|
2007-10-10 06:01:59 +08:00
|
|
|
Actions.ActOnTranslationUnitScope(Tok.getLocation(), CurScope);
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.is(tok::eof) &&
|
2007-08-25 13:47:03 +08:00
|
|
|
!getLang().CPlusPlus) // Empty source file is an extension in C
|
2006-11-10 13:19:25 +08:00
|
|
|
Diag(Tok, diag::ext_empty_source_file);
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2007-08-30 06:54:08 +08:00
|
|
|
// Initialization for Objective-C context sensitive keywords recognition.
|
2008-01-08 03:49:32 +08:00
|
|
|
// Referenced in Parser::ParseObjCTypeQualifierList.
|
2007-08-30 06:54:08 +08:00
|
|
|
if (getLang().ObjC1) {
|
2008-01-08 03:49:32 +08:00
|
|
|
ObjCTypeQuals[objc_in] = &PP.getIdentifierTable().get("in");
|
|
|
|
ObjCTypeQuals[objc_out] = &PP.getIdentifierTable().get("out");
|
|
|
|
ObjCTypeQuals[objc_inout] = &PP.getIdentifierTable().get("inout");
|
|
|
|
ObjCTypeQuals[objc_oneway] = &PP.getIdentifierTable().get("oneway");
|
|
|
|
ObjCTypeQuals[objc_bycopy] = &PP.getIdentifierTable().get("bycopy");
|
|
|
|
ObjCTypeQuals[objc_byref] = &PP.getIdentifierTable().get("byref");
|
2007-08-30 06:54:08 +08:00
|
|
|
}
|
2008-08-15 06:04:54 +08:00
|
|
|
|
|
|
|
Ident_super = &PP.getIdentifierTable().get("super");
|
2006-08-17 15:04:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// ParseTopLevelDecl - Parse one top-level declaration, return whatever the
|
|
|
|
/// action tells us to. This returns true if the EOF was encountered.
|
2009-03-30 00:50:03 +08:00
|
|
|
bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result) {
|
|
|
|
Result = DeclGroupPtrTy();
|
2008-08-23 11:19:52 +08:00
|
|
|
if (Tok.is(tok::eof)) {
|
|
|
|
Actions.ActOnEndOfTranslationUnit();
|
|
|
|
return true;
|
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2007-11-30 07:05:20 +08:00
|
|
|
Result = ParseExternalDeclaration();
|
2006-08-17 15:04:37 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// ParseTranslationUnit:
|
|
|
|
/// translation-unit: [C99 6.9]
|
2008-06-20 03:28:49 +08:00
|
|
|
/// external-declaration
|
|
|
|
/// translation-unit external-declaration
|
2006-08-17 15:04:37 +08:00
|
|
|
void Parser::ParseTranslationUnit() {
|
2008-12-10 14:34:36 +08:00
|
|
|
Initialize();
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2009-03-30 00:50:03 +08:00
|
|
|
DeclGroupPtrTy Res;
|
2007-11-30 07:05:20 +08:00
|
|
|
while (!ParseTopLevelDecl(Res))
|
2006-08-17 15:04:37 +08:00
|
|
|
/*parse them all*/;
|
2008-08-23 10:00:52 +08:00
|
|
|
|
|
|
|
ExitScope();
|
|
|
|
assert(CurScope == 0 && "Scope imbalance!");
|
2006-08-17 15:04:37 +08:00
|
|
|
}
|
|
|
|
|
2006-07-31 09:59:18 +08:00
|
|
|
/// ParseExternalDeclaration:
|
2008-12-09 05:59:01 +08:00
|
|
|
///
|
2008-11-22 00:10:08 +08:00
|
|
|
/// external-declaration: [C99 6.9], declaration: [C++ dcl.dcl]
|
2007-08-11 04:57:02 +08:00
|
|
|
/// function-definition
|
|
|
|
/// declaration
|
2006-07-31 09:59:18 +08:00
|
|
|
/// [EXT] ';'
|
2006-08-15 11:41:14 +08:00
|
|
|
/// [GNU] asm-definition
|
2007-08-11 04:57:02 +08:00
|
|
|
/// [GNU] __extension__ external-declaration
|
2006-11-05 10:05:37 +08:00
|
|
|
/// [OBJC] objc-class-definition
|
|
|
|
/// [OBJC] objc-class-declaration
|
|
|
|
/// [OBJC] objc-alias-declaration
|
|
|
|
/// [OBJC] objc-protocol-definition
|
|
|
|
/// [OBJC] objc-method-definition
|
|
|
|
/// [OBJC] @end
|
2008-11-22 00:10:08 +08:00
|
|
|
/// [C++] linkage-specification
|
2006-08-15 11:41:14 +08:00
|
|
|
/// [GNU] asm-definition:
|
|
|
|
/// simple-asm-expr ';'
|
|
|
|
///
|
2009-03-30 00:50:03 +08:00
|
|
|
Parser::DeclGroupPtrTy Parser::ParseExternalDeclaration() {
|
|
|
|
DeclPtrTy SingleDecl;
|
2006-07-31 09:59:18 +08:00
|
|
|
switch (Tok.getKind()) {
|
|
|
|
case tok::semi:
|
2009-04-02 06:41:11 +08:00
|
|
|
Diag(Tok, diag::ext_top_level_semi)
|
|
|
|
<< CodeModificationHint::CreateRemoval(SourceRange(Tok.getLocation()));
|
2006-07-31 09:59:18 +08:00
|
|
|
ConsumeToken();
|
2006-10-16 08:33:54 +08:00
|
|
|
// TODO: Invoke action for top-level semicolon.
|
2009-03-30 00:50:03 +08:00
|
|
|
return DeclGroupPtrTy();
|
2008-12-09 05:59:01 +08:00
|
|
|
case tok::r_brace:
|
|
|
|
Diag(Tok, diag::err_expected_external_declaration);
|
|
|
|
ConsumeBrace();
|
2009-03-30 00:50:03 +08:00
|
|
|
return DeclGroupPtrTy();
|
2008-12-09 05:59:01 +08:00
|
|
|
case tok::eof:
|
|
|
|
Diag(Tok, diag::err_expected_external_declaration);
|
2009-03-30 00:50:03 +08:00
|
|
|
return DeclGroupPtrTy();
|
2007-08-11 04:57:02 +08:00
|
|
|
case tok::kw___extension__: {
|
2008-10-20 14:45:43 +08:00
|
|
|
// __extension__ silences extension warnings in the subexpression.
|
|
|
|
ExtensionRAIIObject O(Diags); // Use RAII to do this.
|
2008-10-20 14:51:33 +08:00
|
|
|
ConsumeToken();
|
2008-10-20 14:45:43 +08:00
|
|
|
return ParseExternalDeclaration();
|
2007-08-11 04:57:02 +08:00
|
|
|
}
|
2008-02-08 08:33:21 +08:00
|
|
|
case tok::kw_asm: {
|
2008-12-10 08:02:53 +08:00
|
|
|
OwningExprResult Result(ParseSimpleAsm());
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2008-02-08 08:23:11 +08:00
|
|
|
ExpectAndConsume(tok::semi, diag::err_expected_semi_after,
|
|
|
|
"top-level asm block");
|
2008-02-08 08:33:21 +08:00
|
|
|
|
2009-03-30 00:50:03 +08:00
|
|
|
if (Result.isInvalid())
|
|
|
|
return DeclGroupPtrTy();
|
|
|
|
SingleDecl = Actions.ActOnFileScopeAsmDecl(Tok.getLocation(), move(Result));
|
|
|
|
break;
|
2008-02-08 08:33:21 +08:00
|
|
|
}
|
2006-10-28 07:18:49 +08:00
|
|
|
case tok::at:
|
2009-03-30 00:50:03 +08:00
|
|
|
// @ is not a legal token unless objc is enabled, no need to check for ObjC.
|
|
|
|
/// FIXME: ParseObjCAtDirectives should return a DeclGroup for things like
|
|
|
|
/// @class foo, bar;
|
|
|
|
SingleDecl = ParseObjCAtDirectives();
|
|
|
|
break;
|
2006-10-28 07:18:49 +08:00
|
|
|
case tok::minus:
|
|
|
|
case tok::plus:
|
2009-03-30 00:50:03 +08:00
|
|
|
if (!getLang().ObjC1) {
|
|
|
|
Diag(Tok, diag::err_expected_external_declaration);
|
|
|
|
ConsumeToken();
|
|
|
|
return DeclGroupPtrTy();
|
|
|
|
}
|
|
|
|
SingleDecl = ParseObjCMethodDefinition();
|
|
|
|
break;
|
2008-12-30 11:27:21 +08:00
|
|
|
case tok::kw_using:
|
2007-08-25 14:57:03 +08:00
|
|
|
case tok::kw_namespace:
|
2006-11-19 10:31:38 +08:00
|
|
|
case tok::kw_typedef:
|
2008-12-02 07:54:00 +08:00
|
|
|
case tok::kw_template:
|
|
|
|
case tok::kw_export: // As in 'export template'
|
2009-03-12 00:27:10 +08:00
|
|
|
case tok::kw_static_assert:
|
2007-08-26 02:15:16 +08:00
|
|
|
// A function definition cannot start with a these keywords.
|
fix a FIXME, providing accurate source range info for DeclStmt's. The end
of the range is now the ';' location. For something like this:
$ cat t2.c
#define bool int
void f(int x, int y) {
bool b = !x && y;
}
We used to produce:
$ clang-cc t2.c -ast-dump
typedef char *__builtin_va_list;
void f(int x, int y)
(CompoundStmt 0x2201f10 <t2.c:3:22, line:5:1>
(DeclStmt 0x2201ef0 <line:2:14> <----
0x2201a20 "int b =
(BinaryOperator 0x2201ed0 <line:4:10, col:16> 'int' '&&'
(UnaryOperator 0x2201e90 <col:10, col:11> 'int' prefix '!'
(DeclRefExpr 0x2201c90 <col:11> 'int' ParmVar='x' 0x2201a50))
(DeclRefExpr 0x2201eb0 <col:16> 'int' ParmVar='y' 0x2201e10))")
Now we produce:
$ clang-cc t2.c -ast-dump
typedef char *__builtin_va_list;
void f(int x, int y)
(CompoundStmt 0x2201f10 <t2.c:3:22, line:5:1>
(DeclStmt 0x2201ef0 <line:2:14, line:4:17> <------
0x2201a20 "int b =
(BinaryOperator 0x2201ed0 <col:10, col:16> 'int' '&&'
(UnaryOperator 0x2201e90 <col:10, col:11> 'int' prefix '!'
(DeclRefExpr 0x2201c90 <col:11> 'int' ParmVar='x' 0x2201a50))
(DeclRefExpr 0x2201eb0 <col:16> 'int' ParmVar='y' 0x2201e10))")
llvm-svn: 68288
2009-04-02 12:16:50 +08:00
|
|
|
{
|
|
|
|
SourceLocation DeclEnd;
|
|
|
|
return ParseDeclaration(Declarator::FileContext, DeclEnd);
|
|
|
|
}
|
2006-07-31 09:59:18 +08:00
|
|
|
default:
|
|
|
|
// We can't tell whether this is a function-definition or declaration yet.
|
2006-10-16 08:33:54 +08:00
|
|
|
return ParseDeclarationOrFunctionDefinition();
|
2006-07-31 09:59:18 +08:00
|
|
|
}
|
2009-03-30 00:50:03 +08:00
|
|
|
|
|
|
|
// This routine returns a DeclGroup, if the thing we parsed only contains a
|
|
|
|
// single decl, convert it now.
|
|
|
|
return Actions.ConvertDeclToDeclGroup(SingleDecl);
|
2006-07-31 09:59:18 +08:00
|
|
|
}
|
|
|
|
|
2009-05-13 05:31:51 +08:00
|
|
|
/// \brief Determine whether the current token, if it occurs after a
|
|
|
|
/// declarator, continues a declaration or declaration list.
|
|
|
|
bool Parser::isDeclarationAfterDeclarator() {
|
|
|
|
return Tok.is(tok::equal) || // int X()= -> not a function def
|
|
|
|
Tok.is(tok::comma) || // int X(), -> not a function def
|
|
|
|
Tok.is(tok::semi) || // int X(); -> not a function def
|
|
|
|
Tok.is(tok::kw_asm) || // int X() __asm__ -> not a function def
|
|
|
|
Tok.is(tok::kw___attribute) || // int X() __attr__ -> not a function def
|
|
|
|
(getLang().CPlusPlus &&
|
|
|
|
Tok.is(tok::l_paren)); // int X(0) -> not a function def [C++]
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Determine whether the current token, if it occurs after a
|
|
|
|
/// declarator, indicates the start of a function definition.
|
|
|
|
bool Parser::isStartOfFunctionDefinition() {
|
|
|
|
return Tok.is(tok::l_brace) || // int X() {}
|
|
|
|
(!getLang().CPlusPlus &&
|
|
|
|
isDeclarationSpecifier()) || // int X(f) int f; {}
|
|
|
|
(getLang().CPlusPlus &&
|
|
|
|
(Tok.is(tok::colon) || // X() : Base() {} (used for ctors)
|
|
|
|
Tok.is(tok::kw_try))); // X() try { ... }
|
|
|
|
}
|
|
|
|
|
2006-07-31 09:59:18 +08:00
|
|
|
/// ParseDeclarationOrFunctionDefinition - Parse either a function-definition or
|
2006-07-31 13:09:04 +08:00
|
|
|
/// a declaration. We can't tell which we have until we read up to the
|
2008-12-24 10:52:09 +08:00
|
|
|
/// compound-statement in function-definition. TemplateParams, if
|
|
|
|
/// non-NULL, provides the template parameters when we're parsing a
|
|
|
|
/// C++ template-declaration.
|
2006-07-31 09:59:18 +08:00
|
|
|
///
|
2006-07-31 13:09:04 +08:00
|
|
|
/// function-definition: [C99 6.9.1]
|
2008-04-05 13:52:15 +08:00
|
|
|
/// decl-specs declarator declaration-list[opt] compound-statement
|
|
|
|
/// [C90] function-definition: [C99 6.7.1] - implicit int result
|
2008-06-20 03:28:49 +08:00
|
|
|
/// [C90] decl-specs[opt] declarator declaration-list[opt] compound-statement
|
2008-04-05 13:52:15 +08:00
|
|
|
///
|
2006-07-31 13:09:04 +08:00
|
|
|
/// declaration: [C99 6.7]
|
2007-08-22 14:06:56 +08:00
|
|
|
/// declaration-specifiers init-declarator-list[opt] ';'
|
|
|
|
/// [!C99] init-declarator-list ';' [TODO: warn in c99 mode]
|
2006-07-31 13:09:04 +08:00
|
|
|
/// [OMP] threadprivate-directive [TODO]
|
|
|
|
///
|
2009-03-30 00:50:03 +08:00
|
|
|
Parser::DeclGroupPtrTy
|
2009-05-13 05:43:46 +08:00
|
|
|
Parser::ParseDeclarationOrFunctionDefinition(AccessSpecifier AS) {
|
2006-07-31 13:09:04 +08:00
|
|
|
// Parse the common declaration-specifiers piece.
|
2006-08-04 12:39:53 +08:00
|
|
|
DeclSpec DS;
|
2009-05-13 07:25:50 +08:00
|
|
|
ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS);
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-08-05 16:09:44 +08:00
|
|
|
// C99 6.7.2.3p6: Handle "struct-or-union identifier;", "enum { X };"
|
2006-08-10 13:19:57 +08:00
|
|
|
// declaration-specifiers init-declarator-list[opt] ';'
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.is(tok::semi)) {
|
2006-08-14 03:58:17 +08:00
|
|
|
ConsumeToken();
|
2009-03-30 00:50:03 +08:00
|
|
|
DeclPtrTy TheDecl = Actions.ParsedFreeStandingDeclSpec(CurScope, DS);
|
|
|
|
return Actions.ConvertDeclToDeclGroup(TheDecl);
|
2006-08-14 03:58:17 +08:00
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2008-09-26 12:48:09 +08:00
|
|
|
// ObjC2 allows prefix attributes on class interfaces and protocols.
|
|
|
|
// FIXME: This still needs better diagnostics. We should only accept
|
|
|
|
// attributes here, no types, etc.
|
2007-10-10 01:23:58 +08:00
|
|
|
if (getLang().ObjC2 && Tok.is(tok::at)) {
|
2007-08-21 05:31:48 +08:00
|
|
|
SourceLocation AtLoc = ConsumeToken(); // the "@"
|
2008-09-26 12:48:09 +08:00
|
|
|
if (!Tok.isObjCAtKeyword(tok::objc_interface) &&
|
|
|
|
!Tok.isObjCAtKeyword(tok::objc_protocol)) {
|
|
|
|
Diag(Tok, diag::err_objc_unexpected_attr);
|
2007-12-28 03:57:00 +08:00
|
|
|
SkipUntil(tok::semi); // FIXME: better skip?
|
2009-03-30 00:50:03 +08:00
|
|
|
return DeclGroupPtrTy();
|
2007-12-28 03:57:00 +08:00
|
|
|
}
|
2008-01-03 03:17:38 +08:00
|
|
|
const char *PrevSpec = 0;
|
|
|
|
if (DS.SetTypeSpecType(DeclSpec::TST_unspecified, AtLoc, PrevSpec))
|
2008-11-18 15:48:38 +08:00
|
|
|
Diag(AtLoc, diag::err_invalid_decl_spec_combination) << PrevSpec;
|
2009-03-30 00:50:03 +08:00
|
|
|
|
|
|
|
DeclPtrTy TheDecl;
|
2008-09-26 12:48:09 +08:00
|
|
|
if (Tok.isObjCAtKeyword(tok::objc_protocol))
|
2009-03-30 00:50:03 +08:00
|
|
|
TheDecl = ParseObjCAtProtocolDeclaration(AtLoc, DS.getAttributes());
|
|
|
|
else
|
|
|
|
TheDecl = ParseObjCAtInterfaceDeclaration(AtLoc, DS.getAttributes());
|
|
|
|
return Actions.ConvertDeclToDeclGroup(TheDecl);
|
2007-08-21 05:31:48 +08:00
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2008-01-12 15:05:38 +08:00
|
|
|
// If the declspec consisted only of 'extern' and we have a string
|
|
|
|
// literal following it, this must be a C++ linkage specifier like
|
|
|
|
// 'extern "C"'.
|
2008-01-12 15:08:43 +08:00
|
|
|
if (Tok.is(tok::string_literal) && getLang().CPlusPlus &&
|
2008-01-12 15:05:38 +08:00
|
|
|
DS.getStorageClassSpec() == DeclSpec::SCS_extern &&
|
2009-03-30 00:50:03 +08:00
|
|
|
DS.getParsedSpecifiers() == DeclSpec::PQ_StorageClassSpecifier) {
|
|
|
|
DeclPtrTy TheDecl = ParseLinkage(Declarator::FileContext);
|
|
|
|
return Actions.ConvertDeclToDeclGroup(TheDecl);
|
|
|
|
}
|
2008-01-12 15:05:38 +08:00
|
|
|
|
2006-08-07 14:31:38 +08:00
|
|
|
// Parse the first declarator.
|
|
|
|
Declarator DeclaratorInfo(DS, Declarator::FileContext);
|
|
|
|
ParseDeclarator(DeclaratorInfo);
|
|
|
|
// Error parsing the declarator?
|
2008-11-18 06:58:34 +08:00
|
|
|
if (!DeclaratorInfo.hasName()) {
|
2006-08-07 14:31:38 +08:00
|
|
|
// If so, skip until the semi-colon or a }.
|
2008-12-02 07:03:32 +08:00
|
|
|
SkipUntil(tok::r_brace, true, true);
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.is(tok::semi))
|
2006-08-07 14:31:38 +08:00
|
|
|
ConsumeToken();
|
2009-03-30 00:50:03 +08:00
|
|
|
return DeclGroupPtrTy();
|
2006-08-07 14:31:38 +08:00
|
|
|
}
|
2006-07-31 13:09:04 +08:00
|
|
|
|
2009-05-13 05:31:51 +08:00
|
|
|
// If we have a declaration or declarator list, handle it.
|
|
|
|
if (isDeclarationAfterDeclarator()) {
|
2009-03-30 00:50:03 +08:00
|
|
|
// Parse the init-declarator-list for a normal declaration.
|
2009-03-30 01:18:04 +08:00
|
|
|
DeclGroupPtrTy DG =
|
|
|
|
ParseInitDeclaratorListAfterFirstDeclarator(DeclaratorInfo);
|
|
|
|
// Eat the semi colon after the declaration.
|
|
|
|
ExpectAndConsume(tok::semi, diag::err_expected_semi_declation);
|
|
|
|
return DG;
|
2009-03-30 00:50:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (DeclaratorInfo.isFunctionDeclarator() &&
|
2009-05-13 05:31:51 +08:00
|
|
|
isStartOfFunctionDefinition()) {
|
2008-02-14 10:58:32 +08:00
|
|
|
if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
|
|
|
|
Diag(Tok, diag::err_function_declared_typedef);
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2008-02-14 10:58:32 +08:00
|
|
|
if (Tok.is(tok::l_brace)) {
|
|
|
|
// This recovery skips the entire function body. It would be nice
|
2008-12-06 02:15:24 +08:00
|
|
|
// to simply call ParseFunctionDefinition() below, however Sema
|
2008-02-14 10:58:32 +08:00
|
|
|
// assumes the declarator represents a function, not a typedef.
|
|
|
|
ConsumeBrace();
|
|
|
|
SkipUntil(tok::r_brace, true);
|
|
|
|
} else {
|
|
|
|
SkipUntil(tok::semi);
|
|
|
|
}
|
2009-03-30 00:50:03 +08:00
|
|
|
return DeclGroupPtrTy();
|
2008-02-14 10:58:32 +08:00
|
|
|
}
|
2009-03-30 00:50:03 +08:00
|
|
|
DeclPtrTy TheDecl = ParseFunctionDefinition(DeclaratorInfo);
|
|
|
|
return Actions.ConvertDeclToDeclGroup(TheDecl);
|
2006-08-07 14:31:38 +08:00
|
|
|
}
|
2009-03-30 00:50:03 +08:00
|
|
|
|
|
|
|
if (DeclaratorInfo.isFunctionDeclarator())
|
|
|
|
Diag(Tok, diag::err_expected_fn_body);
|
|
|
|
else
|
|
|
|
Diag(Tok, diag::err_invalid_token_after_toplevel_declarator);
|
|
|
|
SkipUntil(tok::semi);
|
|
|
|
return DeclGroupPtrTy();
|
2006-07-31 13:09:04 +08:00
|
|
|
}
|
|
|
|
|
2006-08-07 14:31:38 +08:00
|
|
|
/// ParseFunctionDefinition - We parsed and verified that the specified
|
|
|
|
/// Declarator is well formed. If this is a K&R-style function, read the
|
|
|
|
/// parameters declaration-list, then start the compound-statement.
|
|
|
|
///
|
2008-04-05 13:52:15 +08:00
|
|
|
/// function-definition: [C99 6.9.1]
|
|
|
|
/// decl-specs declarator declaration-list[opt] compound-statement
|
|
|
|
/// [C90] function-definition: [C99 6.7.1] - implicit int result
|
2008-06-20 03:28:49 +08:00
|
|
|
/// [C90] decl-specs[opt] declarator declaration-list[opt] compound-statement
|
2008-11-05 12:29:56 +08:00
|
|
|
/// [C++] function-definition: [C++ 8.4]
|
2009-03-30 01:18:04 +08:00
|
|
|
/// decl-specifier-seq[opt] declarator ctor-initializer[opt]
|
|
|
|
/// function-body
|
2008-11-05 12:29:56 +08:00
|
|
|
/// [C++] function-definition: [C++ 8.4]
|
2009-04-27 04:35:05 +08:00
|
|
|
/// decl-specifier-seq[opt] declarator function-try-block
|
2006-08-07 14:31:38 +08:00
|
|
|
///
|
2009-06-24 08:54:41 +08:00
|
|
|
Parser::DeclPtrTy Parser::ParseFunctionDefinition(Declarator &D,
|
|
|
|
const ParsedTemplateInfo &TemplateInfo) {
|
2006-12-02 14:43:02 +08:00
|
|
|
const DeclaratorChunk &FnTypeInfo = D.getTypeObject(0);
|
|
|
|
assert(FnTypeInfo.Kind == DeclaratorChunk::Function &&
|
2006-08-07 14:31:38 +08:00
|
|
|
"This isn't a function declarator!");
|
2006-12-03 16:41:30 +08:00
|
|
|
const DeclaratorChunk::FunctionTypeInfo &FTI = FnTypeInfo.Fun;
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2008-04-05 13:52:15 +08:00
|
|
|
// If this is C90 and the declspecs were completely missing, fudge in an
|
|
|
|
// implicit int. We do this here because this is the only place where
|
|
|
|
// declaration-specifiers are completely optional in the grammar.
|
2009-02-28 02:35:46 +08:00
|
|
|
if (getLang().ImplicitInt && D.getDeclSpec().isEmpty()) {
|
2008-04-05 13:52:15 +08:00
|
|
|
const char *PrevSpec;
|
2008-10-20 10:01:34 +08:00
|
|
|
D.getMutableDeclSpec().SetTypeSpecType(DeclSpec::TST_int,
|
|
|
|
D.getIdentifierLoc(),
|
|
|
|
PrevSpec);
|
2009-02-10 02:23:29 +08:00
|
|
|
D.SetRangeBegin(D.getDeclSpec().getSourceRange().getBegin());
|
2008-04-05 13:52:15 +08:00
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-08-07 14:31:38 +08:00
|
|
|
// If this declaration was formed with a K&R-style identifier list for the
|
|
|
|
// arguments, parse declarations for all of the args next.
|
|
|
|
// int foo(a,b) int a; float b; {}
|
2006-12-03 16:41:30 +08:00
|
|
|
if (!FTI.hasPrototype && FTI.NumArgs != 0)
|
|
|
|
ParseKNRParamDeclarations(D);
|
2006-08-07 14:31:38 +08:00
|
|
|
|
2008-11-05 12:29:56 +08:00
|
|
|
// We should have either an opening brace or, in a C++ constructor,
|
|
|
|
// we may have a colon.
|
2009-04-27 04:35:05 +08:00
|
|
|
if (Tok.isNot(tok::l_brace) && Tok.isNot(tok::colon) &&
|
|
|
|
Tok.isNot(tok::kw_try)) {
|
2006-08-09 13:47:47 +08:00
|
|
|
Diag(Tok, diag::err_expected_fn_body);
|
|
|
|
|
|
|
|
// Skip over garbage, until we get to '{'. Don't eat the '{'.
|
|
|
|
SkipUntil(tok::l_brace, true, true);
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-08-09 13:47:47 +08:00
|
|
|
// If we didn't find the '{', bail out.
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.isNot(tok::l_brace))
|
2009-03-29 03:18:32 +08:00
|
|
|
return DeclPtrTy();
|
2006-08-09 13:47:47 +08:00
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2007-10-10 01:14:05 +08:00
|
|
|
// Enter a scope for the function body.
|
2008-12-10 14:34:36 +08:00
|
|
|
ParseScope BodyScope(this, Scope::FnScope|Scope::DeclScope);
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2007-10-10 01:14:05 +08:00
|
|
|
// Tell the actions module that we have entered a function definition with the
|
|
|
|
// specified Declarator for the function.
|
2009-06-24 08:54:41 +08:00
|
|
|
DeclPtrTy Res = TemplateInfo.TemplateParams?
|
|
|
|
Actions.ActOnStartOfFunctionTemplateDef(CurScope,
|
|
|
|
Action::MultiTemplateParamsArg(Actions,
|
|
|
|
TemplateInfo.TemplateParams->data(),
|
|
|
|
TemplateInfo.TemplateParams->size()),
|
|
|
|
D)
|
|
|
|
: Actions.ActOnStartOfFunctionDef(CurScope, D);
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2009-04-27 04:35:05 +08:00
|
|
|
if (Tok.is(tok::kw_try))
|
|
|
|
return ParseFunctionTryBlock(Res);
|
|
|
|
|
2008-11-05 12:29:56 +08:00
|
|
|
// If we have a colon, then we're probably parsing a C++
|
|
|
|
// ctor-initializer.
|
|
|
|
if (Tok.is(tok::colon))
|
|
|
|
ParseConstructorInitializer(Res);
|
2009-07-15 04:06:22 +08:00
|
|
|
else
|
2009-07-22 06:36:06 +08:00
|
|
|
Actions.ActOnDefaultCtorInitializers(Res);
|
2008-11-05 12:29:56 +08:00
|
|
|
|
2009-03-05 08:49:17 +08:00
|
|
|
return ParseFunctionStatementBody(Res);
|
2006-08-07 14:31:38 +08:00
|
|
|
}
|
|
|
|
|
2006-12-03 16:41:30 +08:00
|
|
|
/// ParseKNRParamDeclarations - Parse 'declaration-list[opt]' which provides
|
|
|
|
/// types for a function with a K&R-style identifier list for arguments.
|
|
|
|
void Parser::ParseKNRParamDeclarations(Declarator &D) {
|
|
|
|
// We know that the top-level of this declarator is a function.
|
|
|
|
DeclaratorChunk::FunctionTypeInfo &FTI = D.getTypeObject(0).Fun;
|
|
|
|
|
2008-04-08 12:40:51 +08:00
|
|
|
// Enter function-declaration scope, limiting any declarators to the
|
|
|
|
// function prototype scope, including parameter declarators.
|
2009-01-10 06:42:13 +08:00
|
|
|
ParseScope PrototypeScope(this, Scope::FunctionPrototypeScope|Scope::DeclScope);
|
2008-04-08 12:40:51 +08:00
|
|
|
|
2006-12-03 16:41:30 +08:00
|
|
|
// Read all the argument declarations.
|
|
|
|
while (isDeclarationSpecifier()) {
|
|
|
|
SourceLocation DSStart = Tok.getLocation();
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-12-03 16:41:30 +08:00
|
|
|
// Parse the common declaration-specifiers piece.
|
|
|
|
DeclSpec DS;
|
|
|
|
ParseDeclarationSpecifiers(DS);
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-12-03 16:41:30 +08:00
|
|
|
// C99 6.9.1p6: 'each declaration in the declaration list shall have at
|
|
|
|
// least one declarator'.
|
|
|
|
// NOTE: GCC just makes this an ext-warn. It's not clear what it does with
|
|
|
|
// the declarations though. It's trivial to ignore them, really hard to do
|
|
|
|
// anything else with them.
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.is(tok::semi)) {
|
2006-12-03 16:41:30 +08:00
|
|
|
Diag(DSStart, diag::err_declaration_does_not_declare_param);
|
|
|
|
ConsumeToken();
|
|
|
|
continue;
|
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-12-03 16:41:30 +08:00
|
|
|
// C99 6.9.1p6: Declarations shall contain no storage-class specifiers other
|
|
|
|
// than register.
|
|
|
|
if (DS.getStorageClassSpec() != DeclSpec::SCS_unspecified &&
|
|
|
|
DS.getStorageClassSpec() != DeclSpec::SCS_register) {
|
|
|
|
Diag(DS.getStorageClassSpecLoc(),
|
|
|
|
diag::err_invalid_storage_class_in_func_decl);
|
|
|
|
DS.ClearStorageClassSpecs();
|
|
|
|
}
|
|
|
|
if (DS.isThreadSpecified()) {
|
|
|
|
Diag(DS.getThreadSpecLoc(),
|
|
|
|
diag::err_invalid_storage_class_in_func_decl);
|
|
|
|
DS.ClearStorageClassSpecs();
|
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-12-03 16:41:30 +08:00
|
|
|
// Parse the first declarator attached to this declspec.
|
|
|
|
Declarator ParmDeclarator(DS, Declarator::KNRTypeListContext);
|
|
|
|
ParseDeclarator(ParmDeclarator);
|
|
|
|
|
|
|
|
// Handle the full declarator list.
|
|
|
|
while (1) {
|
2009-03-29 03:18:32 +08:00
|
|
|
Action::AttrTy *AttrList;
|
2006-12-03 16:41:30 +08:00
|
|
|
// If attributes are present, parse them.
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.is(tok::kw___attribute))
|
2006-12-03 16:41:30 +08:00
|
|
|
// FIXME: attach attributes too.
|
2007-06-02 01:11:19 +08:00
|
|
|
AttrList = ParseAttributes();
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-12-03 16:41:30 +08:00
|
|
|
// Ask the actions module to compute the type for this declarator.
|
2009-03-29 03:18:32 +08:00
|
|
|
Action::DeclPtrTy Param =
|
2008-04-08 12:40:51 +08:00
|
|
|
Actions.ActOnParamDeclarator(CurScope, ParmDeclarator);
|
2007-09-11 04:51:04 +08:00
|
|
|
|
2008-06-20 03:28:49 +08:00
|
|
|
if (Param &&
|
2006-12-03 16:41:30 +08:00
|
|
|
// A missing identifier has already been diagnosed.
|
|
|
|
ParmDeclarator.getIdentifier()) {
|
|
|
|
|
|
|
|
// Scan the argument list looking for the correct param to apply this
|
|
|
|
// type.
|
|
|
|
for (unsigned i = 0; ; ++i) {
|
|
|
|
// C99 6.9.1p6: those declarators shall declare only identifiers from
|
|
|
|
// the identifier list.
|
|
|
|
if (i == FTI.NumArgs) {
|
2008-11-18 15:48:38 +08:00
|
|
|
Diag(ParmDeclarator.getIdentifierLoc(), diag::err_no_matching_param)
|
2008-11-19 15:51:13 +08:00
|
|
|
<< ParmDeclarator.getIdentifier();
|
2006-12-03 16:41:30 +08:00
|
|
|
break;
|
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-12-03 16:41:30 +08:00
|
|
|
if (FTI.ArgInfo[i].Ident == ParmDeclarator.getIdentifier()) {
|
|
|
|
// Reject redefinitions of parameters.
|
2008-04-08 12:40:51 +08:00
|
|
|
if (FTI.ArgInfo[i].Param) {
|
2006-12-03 16:41:30 +08:00
|
|
|
Diag(ParmDeclarator.getIdentifierLoc(),
|
2008-11-18 15:48:38 +08:00
|
|
|
diag::err_param_redefinition)
|
2008-11-19 15:51:13 +08:00
|
|
|
<< ParmDeclarator.getIdentifier();
|
2006-12-03 16:41:30 +08:00
|
|
|
} else {
|
2008-04-08 12:40:51 +08:00
|
|
|
FTI.ArgInfo[i].Param = Param;
|
2006-12-03 16:41:30 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we don't have a comma, it is either the end of the list (a ';') or
|
|
|
|
// an error, bail out.
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.isNot(tok::comma))
|
2006-12-03 16:41:30 +08:00
|
|
|
break;
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-12-03 16:41:30 +08:00
|
|
|
// Consume the comma.
|
|
|
|
ConsumeToken();
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-12-03 16:41:30 +08:00
|
|
|
// Parse the next declarator.
|
|
|
|
ParmDeclarator.clear();
|
|
|
|
ParseDeclarator(ParmDeclarator);
|
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.is(tok::semi)) {
|
2006-12-03 16:41:30 +08:00
|
|
|
ConsumeToken();
|
|
|
|
} else {
|
|
|
|
Diag(Tok, diag::err_parse_error);
|
|
|
|
// Skip to end of block or statement
|
|
|
|
SkipUntil(tok::semi, true);
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.is(tok::semi))
|
2006-12-03 16:41:30 +08:00
|
|
|
ConsumeToken();
|
|
|
|
}
|
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-12-03 16:41:30 +08:00
|
|
|
// The actions module must verify that all arguments were declared.
|
2009-04-02 07:51:29 +08:00
|
|
|
Actions.ActOnFinishKNRParamDeclarations(CurScope, D, Tok.getLocation());
|
2006-12-03 16:41:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-08-15 14:03:28 +08:00
|
|
|
/// ParseAsmStringLiteral - This is just a normal string-literal, but is not
|
|
|
|
/// allowed to be a wide string, and is not subject to character translation.
|
|
|
|
///
|
|
|
|
/// [GNU] asm-string-literal:
|
|
|
|
/// string-literal
|
|
|
|
///
|
2008-12-10 08:02:53 +08:00
|
|
|
Parser::OwningExprResult Parser::ParseAsmStringLiteral() {
|
2006-10-06 13:22:26 +08:00
|
|
|
if (!isTokenStringLiteral()) {
|
2006-08-15 14:03:28 +08:00
|
|
|
Diag(Tok, diag::err_expected_string_literal);
|
2008-12-12 03:30:53 +08:00
|
|
|
return ExprError();
|
2006-08-15 14:03:28 +08:00
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2008-12-12 06:51:44 +08:00
|
|
|
OwningExprResult Res(ParseStringLiteralExpression());
|
2008-12-10 08:02:53 +08:00
|
|
|
if (Res.isInvalid()) return move(Res);
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-08-15 14:03:28 +08:00
|
|
|
// TODO: Diagnose: wide string literal in 'asm'
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2008-12-10 08:02:53 +08:00
|
|
|
return move(Res);
|
2006-08-15 14:03:28 +08:00
|
|
|
}
|
|
|
|
|
2006-08-15 11:41:14 +08:00
|
|
|
/// ParseSimpleAsm
|
|
|
|
///
|
|
|
|
/// [GNU] simple-asm-expr:
|
|
|
|
/// 'asm' '(' asm-string-literal ')'
|
|
|
|
///
|
2009-02-10 02:23:29 +08:00
|
|
|
Parser::OwningExprResult Parser::ParseSimpleAsm(SourceLocation *EndLoc) {
|
2007-10-10 01:23:58 +08:00
|
|
|
assert(Tok.is(tok::kw_asm) && "Not an asm!");
|
2008-02-08 08:33:21 +08:00
|
|
|
SourceLocation Loc = ConsumeToken();
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.isNot(tok::l_paren)) {
|
2008-11-18 15:48:38 +08:00
|
|
|
Diag(Tok, diag::err_expected_lparen_after) << "asm";
|
2008-12-12 03:30:53 +08:00
|
|
|
return ExprError();
|
2006-08-15 11:41:14 +08:00
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2009-02-10 02:23:29 +08:00
|
|
|
Loc = ConsumeParen();
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2008-12-10 08:02:53 +08:00
|
|
|
OwningExprResult Result(ParseAsmStringLiteral());
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2009-02-10 02:23:29 +08:00
|
|
|
if (Result.isInvalid()) {
|
|
|
|
SkipUntil(tok::r_paren, true, true);
|
|
|
|
if (EndLoc)
|
|
|
|
*EndLoc = Tok.getLocation();
|
|
|
|
ConsumeAnyToken();
|
|
|
|
} else {
|
|
|
|
Loc = MatchRHSPunctuation(tok::r_paren, Loc);
|
|
|
|
if (EndLoc)
|
|
|
|
*EndLoc = Loc;
|
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2008-12-10 08:02:53 +08:00
|
|
|
return move(Result);
|
2006-08-15 11:41:14 +08:00
|
|
|
}
|
2006-10-28 07:18:49 +08:00
|
|
|
|
2008-11-09 00:45:02 +08:00
|
|
|
/// TryAnnotateTypeOrScopeToken - If the current token position is on a
|
|
|
|
/// typename (possibly qualified in C++) or a C++ scope specifier not followed
|
|
|
|
/// by a typename, TryAnnotateTypeOrScopeToken will replace one or more tokens
|
|
|
|
/// with a single annotation token representing the typename or C++ scope
|
|
|
|
/// respectively.
|
|
|
|
/// This simplifies handling of C++ scope specifiers and allows efficient
|
|
|
|
/// backtracking without the need to re-parse and resolve nested-names and
|
|
|
|
/// typenames.
|
2008-11-27 05:51:07 +08:00
|
|
|
/// It will mainly be called when we expect to treat identifiers as typenames
|
|
|
|
/// (if they are typenames). For example, in C we do not expect identifiers
|
|
|
|
/// inside expressions to be treated as typenames so it will not be called
|
|
|
|
/// for expressions in C.
|
|
|
|
/// The benefit for C/ObjC is that a typename will be annotated and
|
2009-01-29 03:39:02 +08:00
|
|
|
/// Actions.getTypeName will not be needed to be called again (e.g. getTypeName
|
2008-11-27 05:51:07 +08:00
|
|
|
/// will not be called twice, once to check whether we have a declaration
|
|
|
|
/// specifier, and another one to get the actual type inside
|
|
|
|
/// ParseDeclarationSpecifiers).
|
2009-01-05 07:23:14 +08:00
|
|
|
///
|
2009-06-26 12:27:47 +08:00
|
|
|
/// This returns true if the token was annotated or an unrecoverable error
|
|
|
|
/// occurs.
|
2009-01-05 08:13:00 +08:00
|
|
|
///
|
|
|
|
/// Note that this routine emits an error if you call it with ::new or ::delete
|
|
|
|
/// as the current tokens, so only call it in contexts where these are invalid.
|
2009-01-05 11:55:46 +08:00
|
|
|
bool Parser::TryAnnotateTypeOrScopeToken() {
|
2009-03-28 07:10:48 +08:00
|
|
|
assert((Tok.is(tok::identifier) || Tok.is(tok::coloncolon)
|
|
|
|
|| Tok.is(tok::kw_typename)) &&
|
2009-01-05 09:24:05 +08:00
|
|
|
"Cannot be a type or scope token!");
|
|
|
|
|
2009-03-28 07:10:48 +08:00
|
|
|
if (Tok.is(tok::kw_typename)) {
|
|
|
|
// Parse a C++ typename-specifier, e.g., "typename T::type".
|
|
|
|
//
|
|
|
|
// typename-specifier:
|
|
|
|
// 'typename' '::' [opt] nested-name-specifier identifier
|
|
|
|
// 'typename' '::' [opt] nested-name-specifier template [opt]
|
2009-04-01 08:28:59 +08:00
|
|
|
// simple-template-id
|
2009-03-28 07:10:48 +08:00
|
|
|
SourceLocation TypenameLoc = ConsumeToken();
|
|
|
|
CXXScopeSpec SS;
|
|
|
|
bool HadNestedNameSpecifier = ParseOptionalCXXScopeSpecifier(SS);
|
|
|
|
if (!HadNestedNameSpecifier) {
|
|
|
|
Diag(Tok.getLocation(), diag::err_expected_qualified_after_typename);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
TypeResult Ty;
|
|
|
|
if (Tok.is(tok::identifier)) {
|
|
|
|
// FIXME: check whether the next token is '<', first!
|
|
|
|
Ty = Actions.ActOnTypenameType(TypenameLoc, SS, *Tok.getIdentifierInfo(),
|
|
|
|
Tok.getLocation());
|
2009-04-01 08:28:59 +08:00
|
|
|
} else if (Tok.is(tok::annot_template_id)) {
|
|
|
|
TemplateIdAnnotation *TemplateId
|
|
|
|
= static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue());
|
|
|
|
if (TemplateId->Kind == TNK_Function_template) {
|
|
|
|
Diag(Tok, diag::err_typename_refers_to_non_type_template)
|
|
|
|
<< Tok.getAnnotationRange();
|
|
|
|
return false;
|
|
|
|
}
|
2009-03-28 07:10:48 +08:00
|
|
|
|
2009-04-02 05:51:26 +08:00
|
|
|
AnnotateTemplateIdTokenAsType(0);
|
2009-04-01 08:28:59 +08:00
|
|
|
assert(Tok.is(tok::annot_typename) &&
|
|
|
|
"AnnotateTemplateIdTokenAsType isn't working properly");
|
2009-04-02 05:51:26 +08:00
|
|
|
if (Tok.getAnnotationValue())
|
|
|
|
Ty = Actions.ActOnTypenameType(TypenameLoc, SS, SourceLocation(),
|
|
|
|
Tok.getAnnotationValue());
|
|
|
|
else
|
|
|
|
Ty = true;
|
2009-04-01 08:28:59 +08:00
|
|
|
} else {
|
|
|
|
Diag(Tok, diag::err_expected_type_name_after_typename)
|
|
|
|
<< SS.getRange();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
Tok.setKind(tok::annot_typename);
|
2009-04-02 05:51:26 +08:00
|
|
|
Tok.setAnnotationValue(Ty.isInvalid()? 0 : Ty.get());
|
2009-04-01 08:28:59 +08:00
|
|
|
Tok.setAnnotationEndLoc(Tok.getLocation());
|
|
|
|
Tok.setLocation(TypenameLoc);
|
|
|
|
PP.AnnotateCachedTokens(Tok);
|
|
|
|
return true;
|
2009-03-28 07:10:48 +08:00
|
|
|
}
|
|
|
|
|
2008-11-09 00:45:02 +08:00
|
|
|
CXXScopeSpec SS;
|
2008-11-27 05:41:52 +08:00
|
|
|
if (getLang().CPlusPlus)
|
2009-01-06 14:59:53 +08:00
|
|
|
ParseOptionalCXXScopeSpecifier(SS);
|
2008-11-09 00:45:02 +08:00
|
|
|
|
|
|
|
if (Tok.is(tok::identifier)) {
|
2009-01-05 09:49:50 +08:00
|
|
|
// Determine whether the identifier is a type name.
|
2009-01-29 03:39:02 +08:00
|
|
|
if (TypeTy *Ty = Actions.getTypeName(*Tok.getIdentifierInfo(),
|
2009-02-05 01:00:24 +08:00
|
|
|
Tok.getLocation(), CurScope, &SS)) {
|
2009-01-05 09:49:50 +08:00
|
|
|
// This is a typename. Replace the current token in-place with an
|
|
|
|
// annotation type token.
|
2009-01-06 13:06:21 +08:00
|
|
|
Tok.setKind(tok::annot_typename);
|
2009-01-05 09:49:50 +08:00
|
|
|
Tok.setAnnotationValue(Ty);
|
|
|
|
Tok.setAnnotationEndLoc(Tok.getLocation());
|
|
|
|
if (SS.isNotEmpty()) // it was a C++ qualified type name.
|
|
|
|
Tok.setLocation(SS.getBeginLoc());
|
|
|
|
|
|
|
|
// In case the tokens were cached, have Preprocessor replace
|
|
|
|
// them with the annotation token.
|
|
|
|
PP.AnnotateCachedTokens(Tok);
|
|
|
|
return true;
|
Implement parsing of nested-name-specifiers that involve template-ids, e.g.,
std::vector<int>::allocator_type
When we parse a template-id that names a type, it will become either a
template-id annotation (which is a parsed representation of a
template-id that has not yet been through semantic analysis) or a
typename annotation (where semantic analysis has resolved the
template-id to an actual type), depending on the context. We only
produce a type in contexts where we know that we only need type
information, e.g., in a type specifier. Otherwise, we create a
template-id annotation that can later be "upgraded" by transforming it
into a typename annotation when the parser needs a type. This occurs,
for example, when we've parsed "std::vector<int>" above and then see
the '::' after it. However, it means that when writing something like
this:
template<> class Outer::Inner<int> { ... };
We have two tokens to represent Outer::Inner<int>: one token for the
nested name specifier Outer::, and one template-id annotation token
for Inner<int>, which will be passed to semantic analysis to define
the class template specialization.
Most of the churn in the template tests in this patch come from an
improvement in our error recovery from ill-formed template-ids.
llvm-svn: 65467
2009-02-26 03:37:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!getLang().CPlusPlus) {
|
2009-01-05 09:49:50 +08:00
|
|
|
// If we're in C, we can't have :: tokens at all (the lexer won't return
|
|
|
|
// them). If the identifier is not a type, then it can't be scope either,
|
|
|
|
// just early exit.
|
|
|
|
return false;
|
2008-11-09 00:45:02 +08:00
|
|
|
}
|
2009-01-05 09:49:50 +08:00
|
|
|
|
Implement parsing of nested-name-specifiers that involve template-ids, e.g.,
std::vector<int>::allocator_type
When we parse a template-id that names a type, it will become either a
template-id annotation (which is a parsed representation of a
template-id that has not yet been through semantic analysis) or a
typename annotation (where semantic analysis has resolved the
template-id to an actual type), depending on the context. We only
produce a type in contexts where we know that we only need type
information, e.g., in a type specifier. Otherwise, we create a
template-id annotation that can later be "upgraded" by transforming it
into a typename annotation when the parser needs a type. This occurs,
for example, when we've parsed "std::vector<int>" above and then see
the '::' after it. However, it means that when writing something like
this:
template<> class Outer::Inner<int> { ... };
We have two tokens to represent Outer::Inner<int>: one token for the
nested name specifier Outer::, and one template-id annotation token
for Inner<int>, which will be passed to semantic analysis to define
the class template specialization.
Most of the churn in the template tests in this patch come from an
improvement in our error recovery from ill-formed template-ids.
llvm-svn: 65467
2009-02-26 03:37:18 +08:00
|
|
|
// If this is a template-id, annotate with a template-id or type token.
|
2009-02-10 02:46:07 +08:00
|
|
|
if (NextToken().is(tok::less)) {
|
2009-03-31 06:58:21 +08:00
|
|
|
TemplateTy Template;
|
2009-02-10 02:46:07 +08:00
|
|
|
if (TemplateNameKind TNK
|
|
|
|
= Actions.isTemplateName(*Tok.getIdentifierInfo(),
|
Implement parsing of nested-name-specifiers that involve template-ids, e.g.,
std::vector<int>::allocator_type
When we parse a template-id that names a type, it will become either a
template-id annotation (which is a parsed representation of a
template-id that has not yet been through semantic analysis) or a
typename annotation (where semantic analysis has resolved the
template-id to an actual type), depending on the context. We only
produce a type in contexts where we know that we only need type
information, e.g., in a type specifier. Otherwise, we create a
template-id annotation that can later be "upgraded" by transforming it
into a typename annotation when the parser needs a type. This occurs,
for example, when we've parsed "std::vector<int>" above and then see
the '::' after it. However, it means that when writing something like
this:
template<> class Outer::Inner<int> { ... };
We have two tokens to represent Outer::Inner<int>: one token for the
nested name specifier Outer::, and one template-id annotation token
for Inner<int>, which will be passed to semantic analysis to define
the class template specialization.
Most of the churn in the template tests in this patch come from an
improvement in our error recovery from ill-formed template-ids.
llvm-svn: 65467
2009-02-26 03:37:18 +08:00
|
|
|
CurScope, Template, &SS))
|
2009-06-26 12:27:47 +08:00
|
|
|
if (AnnotateTemplateIdToken(Template, TNK, &SS)) {
|
|
|
|
// If an unrecoverable error occurred, we need to return true here,
|
|
|
|
// because the token stream is in a damaged state. We may not return
|
|
|
|
// a valid identifier.
|
|
|
|
return Tok.isNot(tok::identifier);
|
|
|
|
}
|
2009-02-10 02:46:07 +08:00
|
|
|
}
|
2008-12-19 03:37:40 +08:00
|
|
|
|
Implement parsing of nested-name-specifiers that involve template-ids, e.g.,
std::vector<int>::allocator_type
When we parse a template-id that names a type, it will become either a
template-id annotation (which is a parsed representation of a
template-id that has not yet been through semantic analysis) or a
typename annotation (where semantic analysis has resolved the
template-id to an actual type), depending on the context. We only
produce a type in contexts where we know that we only need type
information, e.g., in a type specifier. Otherwise, we create a
template-id annotation that can later be "upgraded" by transforming it
into a typename annotation when the parser needs a type. This occurs,
for example, when we've parsed "std::vector<int>" above and then see
the '::' after it. However, it means that when writing something like
this:
template<> class Outer::Inner<int> { ... };
We have two tokens to represent Outer::Inner<int>: one token for the
nested name specifier Outer::, and one template-id annotation token
for Inner<int>, which will be passed to semantic analysis to define
the class template specialization.
Most of the churn in the template tests in this patch come from an
improvement in our error recovery from ill-formed template-ids.
llvm-svn: 65467
2009-02-26 03:37:18 +08:00
|
|
|
// The current token, which is either an identifier or a
|
|
|
|
// template-id, is not part of the annotation. Fall through to
|
|
|
|
// push that token back into the stream and complete the C++ scope
|
|
|
|
// specifier annotation.
|
2009-03-28 07:10:48 +08:00
|
|
|
}
|
2008-11-09 00:45:02 +08:00
|
|
|
|
Implement parsing of nested-name-specifiers that involve template-ids, e.g.,
std::vector<int>::allocator_type
When we parse a template-id that names a type, it will become either a
template-id annotation (which is a parsed representation of a
template-id that has not yet been through semantic analysis) or a
typename annotation (where semantic analysis has resolved the
template-id to an actual type), depending on the context. We only
produce a type in contexts where we know that we only need type
information, e.g., in a type specifier. Otherwise, we create a
template-id annotation that can later be "upgraded" by transforming it
into a typename annotation when the parser needs a type. This occurs,
for example, when we've parsed "std::vector<int>" above and then see
the '::' after it. However, it means that when writing something like
this:
template<> class Outer::Inner<int> { ... };
We have two tokens to represent Outer::Inner<int>: one token for the
nested name specifier Outer::, and one template-id annotation token
for Inner<int>, which will be passed to semantic analysis to define
the class template specialization.
Most of the churn in the template tests in this patch come from an
improvement in our error recovery from ill-formed template-ids.
llvm-svn: 65467
2009-02-26 03:37:18 +08:00
|
|
|
if (Tok.is(tok::annot_template_id)) {
|
|
|
|
TemplateIdAnnotation *TemplateId
|
|
|
|
= static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue());
|
2009-03-31 08:43:58 +08:00
|
|
|
if (TemplateId->Kind == TNK_Type_template) {
|
Implement parsing of nested-name-specifiers that involve template-ids, e.g.,
std::vector<int>::allocator_type
When we parse a template-id that names a type, it will become either a
template-id annotation (which is a parsed representation of a
template-id that has not yet been through semantic analysis) or a
typename annotation (where semantic analysis has resolved the
template-id to an actual type), depending on the context. We only
produce a type in contexts where we know that we only need type
information, e.g., in a type specifier. Otherwise, we create a
template-id annotation that can later be "upgraded" by transforming it
into a typename annotation when the parser needs a type. This occurs,
for example, when we've parsed "std::vector<int>" above and then see
the '::' after it. However, it means that when writing something like
this:
template<> class Outer::Inner<int> { ... };
We have two tokens to represent Outer::Inner<int>: one token for the
nested name specifier Outer::, and one template-id annotation token
for Inner<int>, which will be passed to semantic analysis to define
the class template specialization.
Most of the churn in the template tests in this patch come from an
improvement in our error recovery from ill-formed template-ids.
llvm-svn: 65467
2009-02-26 03:37:18 +08:00
|
|
|
// A template-id that refers to a type was parsed into a
|
|
|
|
// template-id annotation in a context where we weren't allowed
|
|
|
|
// to produce a type annotation token. Update the template-id
|
|
|
|
// annotation token to a type annotation token now.
|
2009-04-02 05:51:26 +08:00
|
|
|
AnnotateTemplateIdTokenAsType(&SS);
|
|
|
|
return true;
|
Implement parsing of nested-name-specifiers that involve template-ids, e.g.,
std::vector<int>::allocator_type
When we parse a template-id that names a type, it will become either a
template-id annotation (which is a parsed representation of a
template-id that has not yet been through semantic analysis) or a
typename annotation (where semantic analysis has resolved the
template-id to an actual type), depending on the context. We only
produce a type in contexts where we know that we only need type
information, e.g., in a type specifier. Otherwise, we create a
template-id annotation that can later be "upgraded" by transforming it
into a typename annotation when the parser needs a type. This occurs,
for example, when we've parsed "std::vector<int>" above and then see
the '::' after it. However, it means that when writing something like
this:
template<> class Outer::Inner<int> { ... };
We have two tokens to represent Outer::Inner<int>: one token for the
nested name specifier Outer::, and one template-id annotation token
for Inner<int>, which will be passed to semantic analysis to define
the class template specialization.
Most of the churn in the template tests in this patch come from an
improvement in our error recovery from ill-formed template-ids.
llvm-svn: 65467
2009-02-26 03:37:18 +08:00
|
|
|
}
|
|
|
|
}
|
2008-12-19 03:37:40 +08:00
|
|
|
|
2009-01-05 06:32:19 +08:00
|
|
|
if (SS.isEmpty())
|
2009-06-27 16:17:02 +08:00
|
|
|
return Tok.isNot(tok::identifier) && Tok.isNot(tok::coloncolon);
|
2009-01-05 06:32:19 +08:00
|
|
|
|
|
|
|
// A C++ scope specifier that isn't followed by a typename.
|
|
|
|
// Push the current token back into the token stream (or revert it if it is
|
|
|
|
// cached) and use an annotation scope token for current token.
|
|
|
|
if (PP.isBacktrackEnabled())
|
|
|
|
PP.RevertCachedTokens(1);
|
|
|
|
else
|
|
|
|
PP.EnterToken(Tok);
|
|
|
|
Tok.setKind(tok::annot_cxxscope);
|
2009-03-27 07:56:24 +08:00
|
|
|
Tok.setAnnotationValue(SS.getScopeRep());
|
2009-01-05 06:32:19 +08:00
|
|
|
Tok.setAnnotationRange(SS.getRange());
|
|
|
|
|
|
|
|
// In case the tokens were cached, have Preprocessor replace them with the
|
|
|
|
// annotation token.
|
|
|
|
PP.AnnotateCachedTokens(Tok);
|
2009-01-05 07:23:14 +08:00
|
|
|
return true;
|
2008-11-09 00:45:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// TryAnnotateScopeToken - Like TryAnnotateTypeOrScopeToken but only
|
Implement parsing of nested-name-specifiers that involve template-ids, e.g.,
std::vector<int>::allocator_type
When we parse a template-id that names a type, it will become either a
template-id annotation (which is a parsed representation of a
template-id that has not yet been through semantic analysis) or a
typename annotation (where semantic analysis has resolved the
template-id to an actual type), depending on the context. We only
produce a type in contexts where we know that we only need type
information, e.g., in a type specifier. Otherwise, we create a
template-id annotation that can later be "upgraded" by transforming it
into a typename annotation when the parser needs a type. This occurs,
for example, when we've parsed "std::vector<int>" above and then see
the '::' after it. However, it means that when writing something like
this:
template<> class Outer::Inner<int> { ... };
We have two tokens to represent Outer::Inner<int>: one token for the
nested name specifier Outer::, and one template-id annotation token
for Inner<int>, which will be passed to semantic analysis to define
the class template specialization.
Most of the churn in the template tests in this patch come from an
improvement in our error recovery from ill-formed template-ids.
llvm-svn: 65467
2009-02-26 03:37:18 +08:00
|
|
|
/// annotates C++ scope specifiers and template-ids. This returns
|
2009-06-26 12:27:47 +08:00
|
|
|
/// true if the token was annotated or there was an error that could not be
|
|
|
|
/// recovered from.
|
2009-01-05 08:13:00 +08:00
|
|
|
///
|
|
|
|
/// Note that this routine emits an error if you call it with ::new or ::delete
|
|
|
|
/// as the current tokens, so only call it in contexts where these are invalid.
|
2009-01-05 08:07:25 +08:00
|
|
|
bool Parser::TryAnnotateCXXScopeToken() {
|
2008-11-27 05:41:52 +08:00
|
|
|
assert(getLang().CPlusPlus &&
|
2009-01-05 06:32:19 +08:00
|
|
|
"Call sites of this function should be guarded by checking for C++");
|
2009-01-05 09:24:05 +08:00
|
|
|
assert((Tok.is(tok::identifier) || Tok.is(tok::coloncolon)) &&
|
|
|
|
"Cannot be a type or scope token!");
|
2008-11-09 00:45:02 +08:00
|
|
|
|
2008-11-27 05:41:52 +08:00
|
|
|
CXXScopeSpec SS;
|
2009-01-06 14:59:53 +08:00
|
|
|
if (!ParseOptionalCXXScopeSpecifier(SS))
|
Implement parsing of nested-name-specifiers that involve template-ids, e.g.,
std::vector<int>::allocator_type
When we parse a template-id that names a type, it will become either a
template-id annotation (which is a parsed representation of a
template-id that has not yet been through semantic analysis) or a
typename annotation (where semantic analysis has resolved the
template-id to an actual type), depending on the context. We only
produce a type in contexts where we know that we only need type
information, e.g., in a type specifier. Otherwise, we create a
template-id annotation that can later be "upgraded" by transforming it
into a typename annotation when the parser needs a type. This occurs,
for example, when we've parsed "std::vector<int>" above and then see
the '::' after it. However, it means that when writing something like
this:
template<> class Outer::Inner<int> { ... };
We have two tokens to represent Outer::Inner<int>: one token for the
nested name specifier Outer::, and one template-id annotation token
for Inner<int>, which will be passed to semantic analysis to define
the class template specialization.
Most of the churn in the template tests in this patch come from an
improvement in our error recovery from ill-formed template-ids.
llvm-svn: 65467
2009-02-26 03:37:18 +08:00
|
|
|
return Tok.is(tok::annot_template_id);
|
2008-11-09 00:45:02 +08:00
|
|
|
|
2009-01-05 06:32:19 +08:00
|
|
|
// Push the current token back into the token stream (or revert it if it is
|
|
|
|
// cached) and use an annotation scope token for current token.
|
|
|
|
if (PP.isBacktrackEnabled())
|
|
|
|
PP.RevertCachedTokens(1);
|
|
|
|
else
|
|
|
|
PP.EnterToken(Tok);
|
|
|
|
Tok.setKind(tok::annot_cxxscope);
|
2009-03-27 07:56:24 +08:00
|
|
|
Tok.setAnnotationValue(SS.getScopeRep());
|
2009-01-05 06:32:19 +08:00
|
|
|
Tok.setAnnotationRange(SS.getRange());
|
|
|
|
|
|
|
|
// In case the tokens were cached, have Preprocessor replace them with the
|
|
|
|
// annotation token.
|
|
|
|
PP.AnnotateCachedTokens(Tok);
|
2009-01-05 08:07:25 +08:00
|
|
|
return true;
|
2008-11-09 00:45:02 +08:00
|
|
|
}
|