2006-08-11 02:43:39 +08:00
|
|
|
//===--- Parser.cpp - C Language Family Parser ----------------------------===//
|
2006-07-31 09:59:18 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 03:59:25 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2006-07-31 09:59:18 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements the Parser interfaces.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "clang/Parse/Parser.h"
|
2009-01-29 13:15:15 +08:00
|
|
|
#include "clang/Parse/ParseDiagnostic.h"
|
2006-11-12 07:03:42 +08:00
|
|
|
#include "clang/Parse/DeclSpec.h"
|
2006-08-06 06:46:42 +08:00
|
|
|
#include "clang/Parse/Scope.h"
|
2008-10-20 14:45:43 +08:00
|
|
|
#include "ExtensionRAIIObject.h"
|
2008-10-05 03:21:03 +08:00
|
|
|
#include "ParsePragma.h"
|
2006-07-31 09:59:18 +08:00
|
|
|
using namespace clang;
|
|
|
|
|
2006-11-09 14:32:27 +08:00
|
|
|
Parser::Parser(Preprocessor &pp, Action &actions)
|
2009-02-10 02:46:07 +08:00
|
|
|
: PP(pp), Actions(actions), Diags(PP.getDiagnostics()),
|
|
|
|
GreaterThanIsOperator(true) {
|
2006-10-14 13:19:21 +08:00
|
|
|
Tok.setKind(tok::eof);
|
2006-08-14 08:15:05 +08:00
|
|
|
CurScope = 0;
|
2007-07-15 08:04:39 +08:00
|
|
|
NumCachedScopes = 0;
|
2006-08-07 05:55:29 +08:00
|
|
|
ParenCount = BracketCount = BraceCount = 0;
|
2008-01-08 03:49:32 +08:00
|
|
|
ObjCImpDecl = 0;
|
2008-10-05 03:21:03 +08:00
|
|
|
|
|
|
|
// Add #pragma handlers. These are removed and destroyed in the
|
|
|
|
// destructor.
|
|
|
|
PackHandler =
|
|
|
|
new PragmaPackHandler(&PP.getIdentifierTable().get("pack"), actions);
|
|
|
|
PP.AddPragmaHandler(0, PackHandler);
|
|
|
|
|
2008-06-25 06:12:16 +08:00
|
|
|
// Instantiate a LexedMethodsForTopClass for all the non-nested classes.
|
|
|
|
PushTopClassStack();
|
2006-08-06 06:46:42 +08:00
|
|
|
}
|
|
|
|
|
2008-12-10 03:36:21 +08:00
|
|
|
/// Out-of-line virtual destructor to provide home for Action class.
|
|
|
|
ActionBase::~ActionBase() {}
|
|
|
|
|
2006-08-14 08:22:04 +08:00
|
|
|
/// Out-of-line virtual destructor to provide home for Action class.
|
|
|
|
Action::~Action() {}
|
2006-08-14 08:15:05 +08:00
|
|
|
|
2008-12-30 11:27:21 +08:00
|
|
|
// Defined out-of-line here because of dependecy on AttributeList
|
|
|
|
Action::DeclTy *Action::ActOnUsingDirective(Scope *CurScope,
|
|
|
|
SourceLocation UsingLoc,
|
|
|
|
SourceLocation NamespcLoc,
|
|
|
|
const CXXScopeSpec &SS,
|
|
|
|
SourceLocation IdentLoc,
|
|
|
|
IdentifierInfo *NamespcName,
|
|
|
|
AttributeList *AttrList) {
|
|
|
|
|
|
|
|
// FIXME: Parser seems to assume that Action::ActOn* takes ownership over
|
|
|
|
// passed AttributeList, however other actions don't free it, is it
|
|
|
|
// temporary state or bug?
|
|
|
|
delete AttrList;
|
|
|
|
return 0;
|
|
|
|
}
|
2006-07-31 09:59:18 +08:00
|
|
|
|
2008-11-22 08:59:29 +08:00
|
|
|
DiagnosticBuilder Parser::Diag(SourceLocation Loc, unsigned DiagID) {
|
2008-11-18 15:48:38 +08:00
|
|
|
return Diags.Report(FullSourceLoc(Loc,PP.getSourceManager()), DiagID);
|
|
|
|
}
|
|
|
|
|
2008-11-22 08:59:29 +08:00
|
|
|
DiagnosticBuilder Parser::Diag(const Token &Tok, unsigned DiagID) {
|
2008-11-18 15:48:38 +08:00
|
|
|
return Diag(Tok.getLocation(), DiagID);
|
2006-07-31 09:59:18 +08:00
|
|
|
}
|
|
|
|
|
Introduce code modification hints into the diagnostics system. When we
know how to recover from an error, we can attach a hint to the
diagnostic that states how to modify the code, which can be one of:
- Insert some new code (a text string) at a particular source
location
- Remove the code within a given range
- Replace the code within a given range with some new code (a text
string)
Right now, we use these hints to annotate diagnostic information. For
example, if one uses the '>>' in a template argument in C++98, as in
this code:
template<int I> class B { };
B<1000 >> 2> *b1;
we'll warn that the behavior will change in C++0x. The fix is to
insert parenthese, so we use code insertion annotations to illustrate
where the parentheses go:
test.cpp:10:10: warning: use of right-shift operator ('>>') in template
argument will require parentheses in C++0x
B<1000 >> 2> *b1;
^
( )
Use of these annotations is partially implemented for HTML
diagnostics, but it's not (yet) producing valid HTML, which may be
related to PR2386, so it has been #if 0'd out.
In this future, we could consider hooking this mechanism up to the
rewriter to actually try to fix these problems during compilation (or,
after a compilation whose only errors have fixes). For now, however, I
suggest that we use these code modification hints whenever we can, so
that we get better diagnostics now and will have better coverage when
we find better ways to use this information.
This also fixes PR3410 by placing the complaint about missing tokens
just after the previous token (rather than at the location of the next
token).
llvm-svn: 65570
2009-02-27 05:00:50 +08:00
|
|
|
/// \brief Emits a diagnostic suggesting parentheses surrounding a
|
|
|
|
/// given range.
|
|
|
|
///
|
|
|
|
/// \param Loc The location where we'll emit the diagnostic.
|
|
|
|
/// \param Loc The kind of diagnostic to emit.
|
|
|
|
/// \param ParenRange Source range enclosing code that should be parenthesized.
|
|
|
|
void Parser::SuggestParentheses(SourceLocation Loc, unsigned DK,
|
|
|
|
SourceRange ParenRange) {
|
2009-02-28 01:53:17 +08:00
|
|
|
SourceLocation EndLoc = PP.getLocForEndOfToken(ParenRange.getEnd());
|
|
|
|
if (!ParenRange.getEnd().isFileID() || EndLoc.isInvalid()) {
|
Introduce code modification hints into the diagnostics system. When we
know how to recover from an error, we can attach a hint to the
diagnostic that states how to modify the code, which can be one of:
- Insert some new code (a text string) at a particular source
location
- Remove the code within a given range
- Replace the code within a given range with some new code (a text
string)
Right now, we use these hints to annotate diagnostic information. For
example, if one uses the '>>' in a template argument in C++98, as in
this code:
template<int I> class B { };
B<1000 >> 2> *b1;
we'll warn that the behavior will change in C++0x. The fix is to
insert parenthese, so we use code insertion annotations to illustrate
where the parentheses go:
test.cpp:10:10: warning: use of right-shift operator ('>>') in template
argument will require parentheses in C++0x
B<1000 >> 2> *b1;
^
( )
Use of these annotations is partially implemented for HTML
diagnostics, but it's not (yet) producing valid HTML, which may be
related to PR2386, so it has been #if 0'd out.
In this future, we could consider hooking this mechanism up to the
rewriter to actually try to fix these problems during compilation (or,
after a compilation whose only errors have fixes). For now, however, I
suggest that we use these code modification hints whenever we can, so
that we get better diagnostics now and will have better coverage when
we find better ways to use this information.
This also fixes PR3410 by placing the complaint about missing tokens
just after the previous token (rather than at the location of the next
token).
llvm-svn: 65570
2009-02-27 05:00:50 +08:00
|
|
|
// We can't display the parentheses, so just dig the
|
|
|
|
// warning/error and return.
|
|
|
|
Diag(Loc, DK);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
Diag(Loc, DK)
|
2009-02-28 01:53:17 +08:00
|
|
|
<< CodeModificationHint::CreateInsertion(ParenRange.getBegin(), "(")
|
|
|
|
<< CodeModificationHint::CreateInsertion(EndLoc, ")");
|
Introduce code modification hints into the diagnostics system. When we
know how to recover from an error, we can attach a hint to the
diagnostic that states how to modify the code, which can be one of:
- Insert some new code (a text string) at a particular source
location
- Remove the code within a given range
- Replace the code within a given range with some new code (a text
string)
Right now, we use these hints to annotate diagnostic information. For
example, if one uses the '>>' in a template argument in C++98, as in
this code:
template<int I> class B { };
B<1000 >> 2> *b1;
we'll warn that the behavior will change in C++0x. The fix is to
insert parenthese, so we use code insertion annotations to illustrate
where the parentheses go:
test.cpp:10:10: warning: use of right-shift operator ('>>') in template
argument will require parentheses in C++0x
B<1000 >> 2> *b1;
^
( )
Use of these annotations is partially implemented for HTML
diagnostics, but it's not (yet) producing valid HTML, which may be
related to PR2386, so it has been #if 0'd out.
In this future, we could consider hooking this mechanism up to the
rewriter to actually try to fix these problems during compilation (or,
after a compilation whose only errors have fixes). For now, however, I
suggest that we use these code modification hints whenever we can, so
that we get better diagnostics now and will have better coverage when
we find better ways to use this information.
This also fixes PR3410 by placing the complaint about missing tokens
just after the previous token (rather than at the location of the next
token).
llvm-svn: 65570
2009-02-27 05:00:50 +08:00
|
|
|
}
|
|
|
|
|
2006-08-11 07:14:52 +08:00
|
|
|
/// MatchRHSPunctuation - For punctuation with a LHS and RHS (e.g. '['/']'),
|
|
|
|
/// this helper function matches and consumes the specified RHS token if
|
|
|
|
/// present. If not present, it emits the specified diagnostic indicating
|
|
|
|
/// that the parser failed to match the RHS of the token at LHSLoc. LHSName
|
|
|
|
/// should be the name of the unmatched LHS token.
|
2006-11-05 04:18:38 +08:00
|
|
|
SourceLocation Parser::MatchRHSPunctuation(tok::TokenKind RHSTok,
|
|
|
|
SourceLocation LHSLoc) {
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.is(RHSTok))
|
2006-11-05 04:18:38 +08:00
|
|
|
return ConsumeAnyToken();
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-11-05 04:18:38 +08:00
|
|
|
SourceLocation R = Tok.getLocation();
|
|
|
|
const char *LHSName = "unknown";
|
|
|
|
diag::kind DID = diag::err_parse_error;
|
|
|
|
switch (RHSTok) {
|
|
|
|
default: break;
|
|
|
|
case tok::r_paren : LHSName = "("; DID = diag::err_expected_rparen; break;
|
|
|
|
case tok::r_brace : LHSName = "{"; DID = diag::err_expected_rbrace; break;
|
|
|
|
case tok::r_square: LHSName = "["; DID = diag::err_expected_rsquare; break;
|
2006-12-05 02:06:35 +08:00
|
|
|
case tok::greater: LHSName = "<"; DID = diag::err_expected_greater; break;
|
2006-08-11 07:14:52 +08:00
|
|
|
}
|
2006-11-05 04:18:38 +08:00
|
|
|
Diag(Tok, DID);
|
2008-11-24 07:17:07 +08:00
|
|
|
Diag(LHSLoc, diag::note_matching) << LHSName;
|
2006-11-05 04:18:38 +08:00
|
|
|
SkipUntil(RHSTok);
|
|
|
|
return R;
|
2006-08-11 07:14:52 +08:00
|
|
|
}
|
|
|
|
|
2006-08-13 03:26:13 +08:00
|
|
|
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
|
|
|
|
/// input. If so, it is consumed and false is returned.
|
|
|
|
///
|
|
|
|
/// If the input is malformed, this emits the specified diagnostic. Next, if
|
|
|
|
/// SkipToTok is specified, it calls SkipUntil(SkipToTok). Finally, true is
|
|
|
|
/// returned.
|
|
|
|
bool Parser::ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned DiagID,
|
2006-08-15 11:41:14 +08:00
|
|
|
const char *Msg, tok::TokenKind SkipToTok) {
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.is(ExpectedTok)) {
|
2006-08-15 12:10:31 +08:00
|
|
|
ConsumeAnyToken();
|
2006-08-13 03:26:13 +08:00
|
|
|
return false;
|
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
Introduce code modification hints into the diagnostics system. When we
know how to recover from an error, we can attach a hint to the
diagnostic that states how to modify the code, which can be one of:
- Insert some new code (a text string) at a particular source
location
- Remove the code within a given range
- Replace the code within a given range with some new code (a text
string)
Right now, we use these hints to annotate diagnostic information. For
example, if one uses the '>>' in a template argument in C++98, as in
this code:
template<int I> class B { };
B<1000 >> 2> *b1;
we'll warn that the behavior will change in C++0x. The fix is to
insert parenthese, so we use code insertion annotations to illustrate
where the parentheses go:
test.cpp:10:10: warning: use of right-shift operator ('>>') in template
argument will require parentheses in C++0x
B<1000 >> 2> *b1;
^
( )
Use of these annotations is partially implemented for HTML
diagnostics, but it's not (yet) producing valid HTML, which may be
related to PR2386, so it has been #if 0'd out.
In this future, we could consider hooking this mechanism up to the
rewriter to actually try to fix these problems during compilation (or,
after a compilation whose only errors have fixes). For now, however, I
suggest that we use these code modification hints whenever we can, so
that we get better diagnostics now and will have better coverage when
we find better ways to use this information.
This also fixes PR3410 by placing the complaint about missing tokens
just after the previous token (rather than at the location of the next
token).
llvm-svn: 65570
2009-02-27 05:00:50 +08:00
|
|
|
const char *Spelling = 0;
|
2009-02-28 01:53:17 +08:00
|
|
|
SourceLocation EndLoc = PP.getLocForEndOfToken(PrevTokLocation);
|
|
|
|
if (EndLoc.isValid() &&
|
|
|
|
(Spelling = tok::getTokenSimpleSpelling(ExpectedTok))) {
|
Introduce code modification hints into the diagnostics system. When we
know how to recover from an error, we can attach a hint to the
diagnostic that states how to modify the code, which can be one of:
- Insert some new code (a text string) at a particular source
location
- Remove the code within a given range
- Replace the code within a given range with some new code (a text
string)
Right now, we use these hints to annotate diagnostic information. For
example, if one uses the '>>' in a template argument in C++98, as in
this code:
template<int I> class B { };
B<1000 >> 2> *b1;
we'll warn that the behavior will change in C++0x. The fix is to
insert parenthese, so we use code insertion annotations to illustrate
where the parentheses go:
test.cpp:10:10: warning: use of right-shift operator ('>>') in template
argument will require parentheses in C++0x
B<1000 >> 2> *b1;
^
( )
Use of these annotations is partially implemented for HTML
diagnostics, but it's not (yet) producing valid HTML, which may be
related to PR2386, so it has been #if 0'd out.
In this future, we could consider hooking this mechanism up to the
rewriter to actually try to fix these problems during compilation (or,
after a compilation whose only errors have fixes). For now, however, I
suggest that we use these code modification hints whenever we can, so
that we get better diagnostics now and will have better coverage when
we find better ways to use this information.
This also fixes PR3410 by placing the complaint about missing tokens
just after the previous token (rather than at the location of the next
token).
llvm-svn: 65570
2009-02-27 05:00:50 +08:00
|
|
|
// Show what code to insert to fix this problem.
|
2009-02-28 01:53:17 +08:00
|
|
|
Diag(EndLoc, DiagID)
|
Introduce code modification hints into the diagnostics system. When we
know how to recover from an error, we can attach a hint to the
diagnostic that states how to modify the code, which can be one of:
- Insert some new code (a text string) at a particular source
location
- Remove the code within a given range
- Replace the code within a given range with some new code (a text
string)
Right now, we use these hints to annotate diagnostic information. For
example, if one uses the '>>' in a template argument in C++98, as in
this code:
template<int I> class B { };
B<1000 >> 2> *b1;
we'll warn that the behavior will change in C++0x. The fix is to
insert parenthese, so we use code insertion annotations to illustrate
where the parentheses go:
test.cpp:10:10: warning: use of right-shift operator ('>>') in template
argument will require parentheses in C++0x
B<1000 >> 2> *b1;
^
( )
Use of these annotations is partially implemented for HTML
diagnostics, but it's not (yet) producing valid HTML, which may be
related to PR2386, so it has been #if 0'd out.
In this future, we could consider hooking this mechanism up to the
rewriter to actually try to fix these problems during compilation (or,
after a compilation whose only errors have fixes). For now, however, I
suggest that we use these code modification hints whenever we can, so
that we get better diagnostics now and will have better coverage when
we find better ways to use this information.
This also fixes PR3410 by placing the complaint about missing tokens
just after the previous token (rather than at the location of the next
token).
llvm-svn: 65570
2009-02-27 05:00:50 +08:00
|
|
|
<< Msg
|
2009-02-28 01:53:17 +08:00
|
|
|
<< CodeModificationHint::CreateInsertion(EndLoc, Spelling);
|
Introduce code modification hints into the diagnostics system. When we
know how to recover from an error, we can attach a hint to the
diagnostic that states how to modify the code, which can be one of:
- Insert some new code (a text string) at a particular source
location
- Remove the code within a given range
- Replace the code within a given range with some new code (a text
string)
Right now, we use these hints to annotate diagnostic information. For
example, if one uses the '>>' in a template argument in C++98, as in
this code:
template<int I> class B { };
B<1000 >> 2> *b1;
we'll warn that the behavior will change in C++0x. The fix is to
insert parenthese, so we use code insertion annotations to illustrate
where the parentheses go:
test.cpp:10:10: warning: use of right-shift operator ('>>') in template
argument will require parentheses in C++0x
B<1000 >> 2> *b1;
^
( )
Use of these annotations is partially implemented for HTML
diagnostics, but it's not (yet) producing valid HTML, which may be
related to PR2386, so it has been #if 0'd out.
In this future, we could consider hooking this mechanism up to the
rewriter to actually try to fix these problems during compilation (or,
after a compilation whose only errors have fixes). For now, however, I
suggest that we use these code modification hints whenever we can, so
that we get better diagnostics now and will have better coverage when
we find better ways to use this information.
This also fixes PR3410 by placing the complaint about missing tokens
just after the previous token (rather than at the location of the next
token).
llvm-svn: 65570
2009-02-27 05:00:50 +08:00
|
|
|
} else
|
|
|
|
Diag(Tok, DiagID) << Msg;
|
|
|
|
|
2006-08-13 03:26:13 +08:00
|
|
|
if (SkipToTok != tok::unknown)
|
|
|
|
SkipUntil(SkipToTok);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2006-08-07 05:55:29 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Error recovery.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// SkipUntil - Read tokens until we get to the specified token, then consume
|
2007-07-25 01:03:04 +08:00
|
|
|
/// it (unless DontConsume is true). Because we cannot guarantee that the
|
2006-08-07 05:55:29 +08:00
|
|
|
/// token will ever occur, this skips to the next token, or to some likely
|
|
|
|
/// good stopping point. If StopAtSemi is true, skipping will stop at a ';'
|
|
|
|
/// character.
|
2008-06-20 03:28:49 +08:00
|
|
|
///
|
2006-08-07 05:55:29 +08:00
|
|
|
/// If SkipUntil finds the specified token, it returns true, otherwise it
|
2008-06-20 03:28:49 +08:00
|
|
|
/// returns false.
|
2007-04-28 03:12:15 +08:00
|
|
|
bool Parser::SkipUntil(const tok::TokenKind *Toks, unsigned NumToks,
|
|
|
|
bool StopAtSemi, bool DontConsume) {
|
2006-08-11 14:40:25 +08:00
|
|
|
// We always want this function to skip at least one token if the first token
|
|
|
|
// isn't T and if not at EOF.
|
|
|
|
bool isFirstTokenSkipped = true;
|
2006-08-07 05:55:29 +08:00
|
|
|
while (1) {
|
2007-04-28 03:12:15 +08:00
|
|
|
// If we found one of the tokens, stop and return true.
|
|
|
|
for (unsigned i = 0; i != NumToks; ++i) {
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.is(Toks[i])) {
|
2007-04-28 03:12:15 +08:00
|
|
|
if (DontConsume) {
|
|
|
|
// Noop, don't consume the token.
|
|
|
|
} else {
|
|
|
|
ConsumeAnyToken();
|
|
|
|
}
|
|
|
|
return true;
|
2006-08-07 05:55:29 +08:00
|
|
|
}
|
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-08-07 05:55:29 +08:00
|
|
|
switch (Tok.getKind()) {
|
|
|
|
case tok::eof:
|
|
|
|
// Ran out of tokens.
|
|
|
|
return false;
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-08-07 05:55:29 +08:00
|
|
|
case tok::l_paren:
|
|
|
|
// Recursively skip properly-nested parens.
|
|
|
|
ConsumeParen();
|
2006-08-11 14:40:25 +08:00
|
|
|
SkipUntil(tok::r_paren, false);
|
2006-08-07 05:55:29 +08:00
|
|
|
break;
|
|
|
|
case tok::l_square:
|
|
|
|
// Recursively skip properly-nested square brackets.
|
|
|
|
ConsumeBracket();
|
2006-08-11 14:40:25 +08:00
|
|
|
SkipUntil(tok::r_square, false);
|
2006-08-07 05:55:29 +08:00
|
|
|
break;
|
|
|
|
case tok::l_brace:
|
|
|
|
// Recursively skip properly-nested braces.
|
|
|
|
ConsumeBrace();
|
2006-08-11 14:40:25 +08:00
|
|
|
SkipUntil(tok::r_brace, false);
|
2006-08-07 05:55:29 +08:00
|
|
|
break;
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-08-07 05:55:29 +08:00
|
|
|
// Okay, we found a ']' or '}' or ')', which we think should be balanced.
|
|
|
|
// Since the user wasn't looking for this token (if they were, it would
|
|
|
|
// already be handled), this isn't balanced. If there is a LHS token at a
|
|
|
|
// higher level, we will assume that this matches the unbalanced token
|
|
|
|
// and return it. Otherwise, this is a spurious RHS token, which we skip.
|
|
|
|
case tok::r_paren:
|
2006-08-11 14:40:25 +08:00
|
|
|
if (ParenCount && !isFirstTokenSkipped)
|
|
|
|
return false; // Matches something.
|
2006-08-07 05:55:29 +08:00
|
|
|
ConsumeParen();
|
|
|
|
break;
|
|
|
|
case tok::r_square:
|
2006-08-11 14:40:25 +08:00
|
|
|
if (BracketCount && !isFirstTokenSkipped)
|
|
|
|
return false; // Matches something.
|
2006-08-07 05:55:29 +08:00
|
|
|
ConsumeBracket();
|
|
|
|
break;
|
|
|
|
case tok::r_brace:
|
2006-08-11 14:40:25 +08:00
|
|
|
if (BraceCount && !isFirstTokenSkipped)
|
|
|
|
return false; // Matches something.
|
2006-08-07 05:55:29 +08:00
|
|
|
ConsumeBrace();
|
|
|
|
break;
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-08-07 05:55:29 +08:00
|
|
|
case tok::string_literal:
|
2006-10-06 13:22:26 +08:00
|
|
|
case tok::wide_string_literal:
|
2006-08-07 05:55:29 +08:00
|
|
|
ConsumeStringToken();
|
|
|
|
break;
|
|
|
|
case tok::semi:
|
|
|
|
if (StopAtSemi)
|
|
|
|
return false;
|
|
|
|
// FALL THROUGH.
|
|
|
|
default:
|
|
|
|
// Skip this token.
|
|
|
|
ConsumeToken();
|
|
|
|
break;
|
|
|
|
}
|
2006-08-11 14:40:25 +08:00
|
|
|
isFirstTokenSkipped = false;
|
2008-06-20 03:28:49 +08:00
|
|
|
}
|
2006-08-07 05:55:29 +08:00
|
|
|
}
|
|
|
|
|
2006-08-14 08:15:05 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Scope manipulation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// EnterScope - Start a new scope.
|
2006-11-06 07:47:55 +08:00
|
|
|
void Parser::EnterScope(unsigned ScopeFlags) {
|
2007-07-15 08:04:39 +08:00
|
|
|
if (NumCachedScopes) {
|
|
|
|
Scope *N = ScopeCache[--NumCachedScopes];
|
2006-11-06 08:22:42 +08:00
|
|
|
N->Init(CurScope, ScopeFlags);
|
|
|
|
CurScope = N;
|
|
|
|
} else {
|
|
|
|
CurScope = new Scope(CurScope, ScopeFlags);
|
|
|
|
}
|
2006-08-14 08:15:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// ExitScope - Pop a scope off the scope stack.
|
|
|
|
void Parser::ExitScope() {
|
|
|
|
assert(CurScope && "Scope imbalance!");
|
|
|
|
|
2007-10-10 04:37:18 +08:00
|
|
|
// Inform the actions module that this scope is going away if there are any
|
|
|
|
// decls in it.
|
|
|
|
if (!CurScope->decl_empty())
|
2007-10-10 06:01:59 +08:00
|
|
|
Actions.ActOnPopScope(Tok.getLocation(), CurScope);
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2007-07-15 08:04:39 +08:00
|
|
|
Scope *OldScope = CurScope;
|
|
|
|
CurScope = OldScope->getParent();
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2007-07-15 08:04:39 +08:00
|
|
|
if (NumCachedScopes == ScopeCacheSize)
|
|
|
|
delete OldScope;
|
2006-11-06 08:22:42 +08:00
|
|
|
else
|
2007-07-15 08:04:39 +08:00
|
|
|
ScopeCache[NumCachedScopes++] = OldScope;
|
2006-08-14 08:15:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2006-07-31 13:09:04 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// C99 6.9: External Definitions.
|
|
|
|
//===----------------------------------------------------------------------===//
|
2006-07-31 09:59:18 +08:00
|
|
|
|
2006-11-06 08:22:42 +08:00
|
|
|
Parser::~Parser() {
|
|
|
|
// If we still have scopes active, delete the scope tree.
|
|
|
|
delete CurScope;
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-11-06 08:22:42 +08:00
|
|
|
// Free the scope cache.
|
2007-07-15 08:04:39 +08:00
|
|
|
for (unsigned i = 0, e = NumCachedScopes; i != e; ++i)
|
|
|
|
delete ScopeCache[i];
|
2008-10-05 03:21:03 +08:00
|
|
|
|
|
|
|
// Remove the pragma handlers we installed.
|
|
|
|
PP.RemovePragmaHandler(0, PackHandler);
|
|
|
|
delete PackHandler;
|
2006-11-06 08:22:42 +08:00
|
|
|
}
|
|
|
|
|
2006-08-17 15:04:37 +08:00
|
|
|
/// Initialize - Warm up the parser.
|
|
|
|
///
|
|
|
|
void Parser::Initialize() {
|
2006-08-14 08:15:05 +08:00
|
|
|
// Prime the lexer look-ahead.
|
|
|
|
ConsumeToken();
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2007-08-26 14:24:45 +08:00
|
|
|
// Create the translation unit scope. Install it as the current scope.
|
2006-08-14 08:15:05 +08:00
|
|
|
assert(CurScope == 0 && "A scope is already active?");
|
2007-08-26 14:24:45 +08:00
|
|
|
EnterScope(Scope::DeclScope);
|
2007-10-10 06:01:59 +08:00
|
|
|
Actions.ActOnTranslationUnitScope(Tok.getLocation(), CurScope);
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.is(tok::eof) &&
|
2007-08-25 13:47:03 +08:00
|
|
|
!getLang().CPlusPlus) // Empty source file is an extension in C
|
2006-11-10 13:19:25 +08:00
|
|
|
Diag(Tok, diag::ext_empty_source_file);
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2007-08-30 06:54:08 +08:00
|
|
|
// Initialization for Objective-C context sensitive keywords recognition.
|
2008-01-08 03:49:32 +08:00
|
|
|
// Referenced in Parser::ParseObjCTypeQualifierList.
|
2007-08-30 06:54:08 +08:00
|
|
|
if (getLang().ObjC1) {
|
2008-01-08 03:49:32 +08:00
|
|
|
ObjCTypeQuals[objc_in] = &PP.getIdentifierTable().get("in");
|
|
|
|
ObjCTypeQuals[objc_out] = &PP.getIdentifierTable().get("out");
|
|
|
|
ObjCTypeQuals[objc_inout] = &PP.getIdentifierTable().get("inout");
|
|
|
|
ObjCTypeQuals[objc_oneway] = &PP.getIdentifierTable().get("oneway");
|
|
|
|
ObjCTypeQuals[objc_bycopy] = &PP.getIdentifierTable().get("bycopy");
|
|
|
|
ObjCTypeQuals[objc_byref] = &PP.getIdentifierTable().get("byref");
|
2007-08-30 06:54:08 +08:00
|
|
|
}
|
2008-08-15 06:04:54 +08:00
|
|
|
|
|
|
|
Ident_super = &PP.getIdentifierTable().get("super");
|
2006-08-17 15:04:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// ParseTopLevelDecl - Parse one top-level declaration, return whatever the
|
|
|
|
/// action tells us to. This returns true if the EOF was encountered.
|
2007-11-30 07:05:20 +08:00
|
|
|
bool Parser::ParseTopLevelDecl(DeclTy*& Result) {
|
|
|
|
Result = 0;
|
2008-08-23 11:19:52 +08:00
|
|
|
if (Tok.is(tok::eof)) {
|
|
|
|
Actions.ActOnEndOfTranslationUnit();
|
|
|
|
return true;
|
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2007-11-30 07:05:20 +08:00
|
|
|
Result = ParseExternalDeclaration();
|
2006-08-17 15:04:37 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// ParseTranslationUnit:
|
|
|
|
/// translation-unit: [C99 6.9]
|
2008-06-20 03:28:49 +08:00
|
|
|
/// external-declaration
|
|
|
|
/// translation-unit external-declaration
|
2006-08-17 15:04:37 +08:00
|
|
|
void Parser::ParseTranslationUnit() {
|
2008-12-10 14:34:36 +08:00
|
|
|
Initialize();
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2007-11-30 07:05:20 +08:00
|
|
|
DeclTy *Res;
|
|
|
|
while (!ParseTopLevelDecl(Res))
|
2006-08-17 15:04:37 +08:00
|
|
|
/*parse them all*/;
|
2008-08-23 10:00:52 +08:00
|
|
|
|
|
|
|
ExitScope();
|
|
|
|
assert(CurScope == 0 && "Scope imbalance!");
|
2006-08-17 15:04:37 +08:00
|
|
|
}
|
|
|
|
|
2006-07-31 09:59:18 +08:00
|
|
|
/// ParseExternalDeclaration:
|
2008-12-09 05:59:01 +08:00
|
|
|
///
|
2008-11-22 00:10:08 +08:00
|
|
|
/// external-declaration: [C99 6.9], declaration: [C++ dcl.dcl]
|
2007-08-11 04:57:02 +08:00
|
|
|
/// function-definition
|
|
|
|
/// declaration
|
2006-07-31 09:59:18 +08:00
|
|
|
/// [EXT] ';'
|
2006-08-15 11:41:14 +08:00
|
|
|
/// [GNU] asm-definition
|
2007-08-11 04:57:02 +08:00
|
|
|
/// [GNU] __extension__ external-declaration
|
2006-11-05 10:05:37 +08:00
|
|
|
/// [OBJC] objc-class-definition
|
|
|
|
/// [OBJC] objc-class-declaration
|
|
|
|
/// [OBJC] objc-alias-declaration
|
|
|
|
/// [OBJC] objc-protocol-definition
|
|
|
|
/// [OBJC] objc-method-definition
|
|
|
|
/// [OBJC] @end
|
2008-11-22 00:10:08 +08:00
|
|
|
/// [C++] linkage-specification
|
2006-08-15 11:41:14 +08:00
|
|
|
/// [GNU] asm-definition:
|
|
|
|
/// simple-asm-expr ';'
|
|
|
|
///
|
2006-10-16 08:33:54 +08:00
|
|
|
Parser::DeclTy *Parser::ParseExternalDeclaration() {
|
2006-07-31 09:59:18 +08:00
|
|
|
switch (Tok.getKind()) {
|
|
|
|
case tok::semi:
|
2006-11-10 13:19:25 +08:00
|
|
|
Diag(Tok, diag::ext_top_level_semi);
|
2006-07-31 09:59:18 +08:00
|
|
|
ConsumeToken();
|
2006-10-16 08:33:54 +08:00
|
|
|
// TODO: Invoke action for top-level semicolon.
|
|
|
|
return 0;
|
2008-12-09 05:59:01 +08:00
|
|
|
case tok::r_brace:
|
|
|
|
Diag(Tok, diag::err_expected_external_declaration);
|
|
|
|
ConsumeBrace();
|
|
|
|
return 0;
|
|
|
|
case tok::eof:
|
|
|
|
Diag(Tok, diag::err_expected_external_declaration);
|
|
|
|
return 0;
|
2007-08-11 04:57:02 +08:00
|
|
|
case tok::kw___extension__: {
|
2008-10-20 14:45:43 +08:00
|
|
|
// __extension__ silences extension warnings in the subexpression.
|
|
|
|
ExtensionRAIIObject O(Diags); // Use RAII to do this.
|
2008-10-20 14:51:33 +08:00
|
|
|
ConsumeToken();
|
2008-10-20 14:45:43 +08:00
|
|
|
return ParseExternalDeclaration();
|
2007-08-11 04:57:02 +08:00
|
|
|
}
|
2008-02-08 08:33:21 +08:00
|
|
|
case tok::kw_asm: {
|
2008-12-10 08:02:53 +08:00
|
|
|
OwningExprResult Result(ParseSimpleAsm());
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2008-02-08 08:23:11 +08:00
|
|
|
ExpectAndConsume(tok::semi, diag::err_expected_semi_after,
|
|
|
|
"top-level asm block");
|
2008-02-08 08:33:21 +08:00
|
|
|
|
2008-12-09 21:15:23 +08:00
|
|
|
if (!Result.isInvalid())
|
2009-02-05 23:02:23 +08:00
|
|
|
return Actions.ActOnFileScopeAsmDecl(Tok.getLocation(), move(Result));
|
2008-05-28 07:32:43 +08:00
|
|
|
return 0;
|
2008-02-08 08:33:21 +08:00
|
|
|
}
|
2006-10-28 07:18:49 +08:00
|
|
|
case tok::at:
|
2007-05-03 07:45:06 +08:00
|
|
|
// @ is not a legal token unless objc is enabled, no need to check.
|
2007-09-11 04:51:04 +08:00
|
|
|
return ParseObjCAtDirectives();
|
2006-10-28 07:18:49 +08:00
|
|
|
case tok::minus:
|
|
|
|
case tok::plus:
|
2007-11-11 00:31:34 +08:00
|
|
|
if (getLang().ObjC1)
|
2007-11-14 07:01:27 +08:00
|
|
|
return ParseObjCMethodDefinition();
|
2007-11-11 00:31:34 +08:00
|
|
|
else {
|
2007-05-03 07:45:06 +08:00
|
|
|
Diag(Tok, diag::err_expected_external_declaration);
|
|
|
|
ConsumeToken();
|
|
|
|
}
|
2006-10-28 07:18:49 +08:00
|
|
|
return 0;
|
2008-12-30 11:27:21 +08:00
|
|
|
case tok::kw_using:
|
2007-08-25 14:57:03 +08:00
|
|
|
case tok::kw_namespace:
|
2006-11-19 10:31:38 +08:00
|
|
|
case tok::kw_typedef:
|
2008-12-02 07:54:00 +08:00
|
|
|
case tok::kw_template:
|
|
|
|
case tok::kw_export: // As in 'export template'
|
2007-08-26 02:15:16 +08:00
|
|
|
// A function definition cannot start with a these keywords.
|
2006-11-19 10:31:38 +08:00
|
|
|
return ParseDeclaration(Declarator::FileContext);
|
2006-07-31 09:59:18 +08:00
|
|
|
default:
|
|
|
|
// We can't tell whether this is a function-definition or declaration yet.
|
2006-10-16 08:33:54 +08:00
|
|
|
return ParseDeclarationOrFunctionDefinition();
|
2006-07-31 09:59:18 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// ParseDeclarationOrFunctionDefinition - Parse either a function-definition or
|
2006-07-31 13:09:04 +08:00
|
|
|
/// a declaration. We can't tell which we have until we read up to the
|
2008-12-24 10:52:09 +08:00
|
|
|
/// compound-statement in function-definition. TemplateParams, if
|
|
|
|
/// non-NULL, provides the template parameters when we're parsing a
|
|
|
|
/// C++ template-declaration.
|
2006-07-31 09:59:18 +08:00
|
|
|
///
|
2006-07-31 13:09:04 +08:00
|
|
|
/// function-definition: [C99 6.9.1]
|
2008-04-05 13:52:15 +08:00
|
|
|
/// decl-specs declarator declaration-list[opt] compound-statement
|
|
|
|
/// [C90] function-definition: [C99 6.7.1] - implicit int result
|
2008-06-20 03:28:49 +08:00
|
|
|
/// [C90] decl-specs[opt] declarator declaration-list[opt] compound-statement
|
2008-04-05 13:52:15 +08:00
|
|
|
///
|
2006-07-31 13:09:04 +08:00
|
|
|
/// declaration: [C99 6.7]
|
2007-08-22 14:06:56 +08:00
|
|
|
/// declaration-specifiers init-declarator-list[opt] ';'
|
|
|
|
/// [!C99] init-declarator-list ';' [TODO: warn in c99 mode]
|
2006-07-31 13:09:04 +08:00
|
|
|
/// [OMP] threadprivate-directive [TODO]
|
|
|
|
///
|
2008-12-24 10:52:09 +08:00
|
|
|
Parser::DeclTy *
|
|
|
|
Parser::ParseDeclarationOrFunctionDefinition(
|
|
|
|
TemplateParameterLists *TemplateParams) {
|
2006-07-31 13:09:04 +08:00
|
|
|
// Parse the common declaration-specifiers piece.
|
2006-08-04 12:39:53 +08:00
|
|
|
DeclSpec DS;
|
2008-12-24 10:52:09 +08:00
|
|
|
ParseDeclarationSpecifiers(DS, TemplateParams);
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-08-05 16:09:44 +08:00
|
|
|
// C99 6.7.2.3p6: Handle "struct-or-union identifier;", "enum { X };"
|
2006-08-10 13:19:57 +08:00
|
|
|
// declaration-specifiers init-declarator-list[opt] ';'
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.is(tok::semi)) {
|
2006-08-14 03:58:17 +08:00
|
|
|
ConsumeToken();
|
2006-11-19 10:43:37 +08:00
|
|
|
return Actions.ParsedFreeStandingDeclSpec(CurScope, DS);
|
2006-08-14 03:58:17 +08:00
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2008-09-26 12:48:09 +08:00
|
|
|
// ObjC2 allows prefix attributes on class interfaces and protocols.
|
|
|
|
// FIXME: This still needs better diagnostics. We should only accept
|
|
|
|
// attributes here, no types, etc.
|
2007-10-10 01:23:58 +08:00
|
|
|
if (getLang().ObjC2 && Tok.is(tok::at)) {
|
2007-08-21 05:31:48 +08:00
|
|
|
SourceLocation AtLoc = ConsumeToken(); // the "@"
|
2008-09-26 12:48:09 +08:00
|
|
|
if (!Tok.isObjCAtKeyword(tok::objc_interface) &&
|
|
|
|
!Tok.isObjCAtKeyword(tok::objc_protocol)) {
|
|
|
|
Diag(Tok, diag::err_objc_unexpected_attr);
|
2007-12-28 03:57:00 +08:00
|
|
|
SkipUntil(tok::semi); // FIXME: better skip?
|
|
|
|
return 0;
|
|
|
|
}
|
2008-01-03 03:17:38 +08:00
|
|
|
const char *PrevSpec = 0;
|
|
|
|
if (DS.SetTypeSpecType(DeclSpec::TST_unspecified, AtLoc, PrevSpec))
|
2008-11-18 15:48:38 +08:00
|
|
|
Diag(AtLoc, diag::err_invalid_decl_spec_combination) << PrevSpec;
|
2008-09-26 12:48:09 +08:00
|
|
|
if (Tok.isObjCAtKeyword(tok::objc_protocol))
|
|
|
|
return ParseObjCAtProtocolDeclaration(AtLoc, DS.getAttributes());
|
2008-06-20 03:28:49 +08:00
|
|
|
return ParseObjCAtInterfaceDeclaration(AtLoc, DS.getAttributes());
|
2007-08-21 05:31:48 +08:00
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2008-01-12 15:05:38 +08:00
|
|
|
// If the declspec consisted only of 'extern' and we have a string
|
|
|
|
// literal following it, this must be a C++ linkage specifier like
|
|
|
|
// 'extern "C"'.
|
2008-01-12 15:08:43 +08:00
|
|
|
if (Tok.is(tok::string_literal) && getLang().CPlusPlus &&
|
2008-01-12 15:05:38 +08:00
|
|
|
DS.getStorageClassSpec() == DeclSpec::SCS_extern &&
|
|
|
|
DS.getParsedSpecifiers() == DeclSpec::PQ_StorageClassSpecifier)
|
|
|
|
return ParseLinkage(Declarator::FileContext);
|
|
|
|
|
2006-08-07 14:31:38 +08:00
|
|
|
// Parse the first declarator.
|
|
|
|
Declarator DeclaratorInfo(DS, Declarator::FileContext);
|
|
|
|
ParseDeclarator(DeclaratorInfo);
|
|
|
|
// Error parsing the declarator?
|
2008-11-18 06:58:34 +08:00
|
|
|
if (!DeclaratorInfo.hasName()) {
|
2006-08-07 14:31:38 +08:00
|
|
|
// If so, skip until the semi-colon or a }.
|
2008-12-02 07:03:32 +08:00
|
|
|
SkipUntil(tok::r_brace, true, true);
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.is(tok::semi))
|
2006-08-07 14:31:38 +08:00
|
|
|
ConsumeToken();
|
2006-10-16 08:33:54 +08:00
|
|
|
return 0;
|
2006-08-07 14:31:38 +08:00
|
|
|
}
|
2006-07-31 13:09:04 +08:00
|
|
|
|
2006-08-07 14:31:38 +08:00
|
|
|
// If the declarator is the start of a function definition, handle it.
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.is(tok::equal) || // int X()= -> not a function def
|
|
|
|
Tok.is(tok::comma) || // int X(), -> not a function def
|
|
|
|
Tok.is(tok::semi) || // int X(); -> not a function def
|
|
|
|
Tok.is(tok::kw_asm) || // int X() __asm__ -> not a function def
|
2008-10-07 01:10:33 +08:00
|
|
|
Tok.is(tok::kw___attribute) || // int X() __attr__ -> not a function def
|
|
|
|
(getLang().CPlusPlus &&
|
|
|
|
Tok.is(tok::l_paren)) ) { // int X(0) -> not a function def [C++]
|
2006-08-07 14:31:38 +08:00
|
|
|
// FALL THROUGH.
|
2006-10-16 06:34:45 +08:00
|
|
|
} else if (DeclaratorInfo.isFunctionDeclarator() &&
|
2008-06-21 18:00:56 +08:00
|
|
|
(Tok.is(tok::l_brace) || // int X() {}
|
2009-02-28 01:15:01 +08:00
|
|
|
(!getLang().CPlusPlus &&
|
|
|
|
isDeclarationSpecifier()))) { // int X(f) int f; {}
|
2008-02-14 10:58:32 +08:00
|
|
|
if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
|
|
|
|
Diag(Tok, diag::err_function_declared_typedef);
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2008-02-14 10:58:32 +08:00
|
|
|
if (Tok.is(tok::l_brace)) {
|
|
|
|
// This recovery skips the entire function body. It would be nice
|
2008-12-06 02:15:24 +08:00
|
|
|
// to simply call ParseFunctionDefinition() below, however Sema
|
2008-02-14 10:58:32 +08:00
|
|
|
// assumes the declarator represents a function, not a typedef.
|
|
|
|
ConsumeBrace();
|
|
|
|
SkipUntil(tok::r_brace, true);
|
|
|
|
} else {
|
|
|
|
SkipUntil(tok::semi);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2006-10-16 08:33:54 +08:00
|
|
|
return ParseFunctionDefinition(DeclaratorInfo);
|
2006-08-07 14:31:38 +08:00
|
|
|
} else {
|
2006-10-16 06:34:45 +08:00
|
|
|
if (DeclaratorInfo.isFunctionDeclarator())
|
2006-08-07 14:31:38 +08:00
|
|
|
Diag(Tok, diag::err_expected_fn_body);
|
|
|
|
else
|
2009-02-28 01:15:01 +08:00
|
|
|
Diag(Tok, diag::err_invalid_token_after_toplevel_declarator);
|
2006-08-14 08:15:05 +08:00
|
|
|
SkipUntil(tok::semi);
|
2006-10-16 08:33:54 +08:00
|
|
|
return 0;
|
2006-08-07 14:31:38 +08:00
|
|
|
}
|
2006-08-05 14:26:47 +08:00
|
|
|
|
2006-08-10 13:19:57 +08:00
|
|
|
// Parse the init-declarator-list for a normal declaration.
|
2006-10-16 08:33:54 +08:00
|
|
|
return ParseInitDeclaratorListAfterFirstDeclarator(DeclaratorInfo);
|
2006-07-31 13:09:04 +08:00
|
|
|
}
|
|
|
|
|
2006-08-07 14:31:38 +08:00
|
|
|
/// ParseFunctionDefinition - We parsed and verified that the specified
|
|
|
|
/// Declarator is well formed. If this is a K&R-style function, read the
|
|
|
|
/// parameters declaration-list, then start the compound-statement.
|
|
|
|
///
|
2008-04-05 13:52:15 +08:00
|
|
|
/// function-definition: [C99 6.9.1]
|
|
|
|
/// decl-specs declarator declaration-list[opt] compound-statement
|
|
|
|
/// [C90] function-definition: [C99 6.7.1] - implicit int result
|
2008-06-20 03:28:49 +08:00
|
|
|
/// [C90] decl-specs[opt] declarator declaration-list[opt] compound-statement
|
2008-11-05 12:29:56 +08:00
|
|
|
/// [C++] function-definition: [C++ 8.4]
|
|
|
|
/// decl-specifier-seq[opt] declarator ctor-initializer[opt] function-body
|
|
|
|
/// [C++] function-definition: [C++ 8.4]
|
|
|
|
/// decl-specifier-seq[opt] declarator function-try-block [TODO]
|
2006-08-07 14:31:38 +08:00
|
|
|
///
|
2006-10-16 08:33:54 +08:00
|
|
|
Parser::DeclTy *Parser::ParseFunctionDefinition(Declarator &D) {
|
2006-12-02 14:43:02 +08:00
|
|
|
const DeclaratorChunk &FnTypeInfo = D.getTypeObject(0);
|
|
|
|
assert(FnTypeInfo.Kind == DeclaratorChunk::Function &&
|
2006-08-07 14:31:38 +08:00
|
|
|
"This isn't a function declarator!");
|
2006-12-03 16:41:30 +08:00
|
|
|
const DeclaratorChunk::FunctionTypeInfo &FTI = FnTypeInfo.Fun;
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2008-04-05 13:52:15 +08:00
|
|
|
// If this is C90 and the declspecs were completely missing, fudge in an
|
|
|
|
// implicit int. We do this here because this is the only place where
|
|
|
|
// declaration-specifiers are completely optional in the grammar.
|
2009-02-28 02:35:46 +08:00
|
|
|
if (getLang().ImplicitInt && D.getDeclSpec().isEmpty()) {
|
2008-04-05 13:52:15 +08:00
|
|
|
const char *PrevSpec;
|
2008-10-20 10:01:34 +08:00
|
|
|
D.getMutableDeclSpec().SetTypeSpecType(DeclSpec::TST_int,
|
|
|
|
D.getIdentifierLoc(),
|
|
|
|
PrevSpec);
|
2009-02-10 02:23:29 +08:00
|
|
|
D.SetRangeBegin(D.getDeclSpec().getSourceRange().getBegin());
|
2008-04-05 13:52:15 +08:00
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-08-07 14:31:38 +08:00
|
|
|
// If this declaration was formed with a K&R-style identifier list for the
|
|
|
|
// arguments, parse declarations for all of the args next.
|
|
|
|
// int foo(a,b) int a; float b; {}
|
2006-12-03 16:41:30 +08:00
|
|
|
if (!FTI.hasPrototype && FTI.NumArgs != 0)
|
|
|
|
ParseKNRParamDeclarations(D);
|
2006-08-07 14:31:38 +08:00
|
|
|
|
2008-11-05 12:29:56 +08:00
|
|
|
// We should have either an opening brace or, in a C++ constructor,
|
|
|
|
// we may have a colon.
|
2008-11-25 05:45:59 +08:00
|
|
|
// FIXME: In C++, we might also find the 'try' keyword.
|
2008-11-05 12:29:56 +08:00
|
|
|
if (Tok.isNot(tok::l_brace) && Tok.isNot(tok::colon)) {
|
2006-08-09 13:47:47 +08:00
|
|
|
Diag(Tok, diag::err_expected_fn_body);
|
|
|
|
|
|
|
|
// Skip over garbage, until we get to '{'. Don't eat the '{'.
|
|
|
|
SkipUntil(tok::l_brace, true, true);
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-08-09 13:47:47 +08:00
|
|
|
// If we didn't find the '{', bail out.
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.isNot(tok::l_brace))
|
2006-10-16 08:33:54 +08:00
|
|
|
return 0;
|
2006-08-09 13:47:47 +08:00
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2007-10-10 01:14:05 +08:00
|
|
|
// Enter a scope for the function body.
|
2008-12-10 14:34:36 +08:00
|
|
|
ParseScope BodyScope(this, Scope::FnScope|Scope::DeclScope);
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2007-10-10 01:14:05 +08:00
|
|
|
// Tell the actions module that we have entered a function definition with the
|
|
|
|
// specified Declarator for the function.
|
|
|
|
DeclTy *Res = Actions.ActOnStartOfFunctionDef(CurScope, D);
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2008-11-05 12:29:56 +08:00
|
|
|
// If we have a colon, then we're probably parsing a C++
|
|
|
|
// ctor-initializer.
|
|
|
|
if (Tok.is(tok::colon))
|
|
|
|
ParseConstructorInitializer(Res);
|
|
|
|
|
|
|
|
SourceLocation BraceLoc = Tok.getLocation();
|
2008-06-20 03:28:49 +08:00
|
|
|
return ParseFunctionStatementBody(Res, BraceLoc, BraceLoc);
|
2006-08-07 14:31:38 +08:00
|
|
|
}
|
|
|
|
|
2006-12-03 16:41:30 +08:00
|
|
|
/// ParseKNRParamDeclarations - Parse 'declaration-list[opt]' which provides
|
|
|
|
/// types for a function with a K&R-style identifier list for arguments.
|
|
|
|
void Parser::ParseKNRParamDeclarations(Declarator &D) {
|
|
|
|
// We know that the top-level of this declarator is a function.
|
|
|
|
DeclaratorChunk::FunctionTypeInfo &FTI = D.getTypeObject(0).Fun;
|
|
|
|
|
2008-04-08 12:40:51 +08:00
|
|
|
// Enter function-declaration scope, limiting any declarators to the
|
|
|
|
// function prototype scope, including parameter declarators.
|
2009-01-10 06:42:13 +08:00
|
|
|
ParseScope PrototypeScope(this, Scope::FunctionPrototypeScope|Scope::DeclScope);
|
2008-04-08 12:40:51 +08:00
|
|
|
|
2006-12-03 16:41:30 +08:00
|
|
|
// Read all the argument declarations.
|
|
|
|
while (isDeclarationSpecifier()) {
|
|
|
|
SourceLocation DSStart = Tok.getLocation();
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-12-03 16:41:30 +08:00
|
|
|
// Parse the common declaration-specifiers piece.
|
|
|
|
DeclSpec DS;
|
|
|
|
ParseDeclarationSpecifiers(DS);
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-12-03 16:41:30 +08:00
|
|
|
// C99 6.9.1p6: 'each declaration in the declaration list shall have at
|
|
|
|
// least one declarator'.
|
|
|
|
// NOTE: GCC just makes this an ext-warn. It's not clear what it does with
|
|
|
|
// the declarations though. It's trivial to ignore them, really hard to do
|
|
|
|
// anything else with them.
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.is(tok::semi)) {
|
2006-12-03 16:41:30 +08:00
|
|
|
Diag(DSStart, diag::err_declaration_does_not_declare_param);
|
|
|
|
ConsumeToken();
|
|
|
|
continue;
|
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-12-03 16:41:30 +08:00
|
|
|
// C99 6.9.1p6: Declarations shall contain no storage-class specifiers other
|
|
|
|
// than register.
|
|
|
|
if (DS.getStorageClassSpec() != DeclSpec::SCS_unspecified &&
|
|
|
|
DS.getStorageClassSpec() != DeclSpec::SCS_register) {
|
|
|
|
Diag(DS.getStorageClassSpecLoc(),
|
|
|
|
diag::err_invalid_storage_class_in_func_decl);
|
|
|
|
DS.ClearStorageClassSpecs();
|
|
|
|
}
|
|
|
|
if (DS.isThreadSpecified()) {
|
|
|
|
Diag(DS.getThreadSpecLoc(),
|
|
|
|
diag::err_invalid_storage_class_in_func_decl);
|
|
|
|
DS.ClearStorageClassSpecs();
|
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-12-03 16:41:30 +08:00
|
|
|
// Parse the first declarator attached to this declspec.
|
|
|
|
Declarator ParmDeclarator(DS, Declarator::KNRTypeListContext);
|
|
|
|
ParseDeclarator(ParmDeclarator);
|
|
|
|
|
|
|
|
// Handle the full declarator list.
|
|
|
|
while (1) {
|
2007-06-02 01:11:19 +08:00
|
|
|
DeclTy *AttrList;
|
2006-12-03 16:41:30 +08:00
|
|
|
// If attributes are present, parse them.
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.is(tok::kw___attribute))
|
2006-12-03 16:41:30 +08:00
|
|
|
// FIXME: attach attributes too.
|
2007-06-02 01:11:19 +08:00
|
|
|
AttrList = ParseAttributes();
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-12-03 16:41:30 +08:00
|
|
|
// Ask the actions module to compute the type for this declarator.
|
2008-06-20 03:28:49 +08:00
|
|
|
Action::DeclTy *Param =
|
2008-04-08 12:40:51 +08:00
|
|
|
Actions.ActOnParamDeclarator(CurScope, ParmDeclarator);
|
2007-09-11 04:51:04 +08:00
|
|
|
|
2008-06-20 03:28:49 +08:00
|
|
|
if (Param &&
|
2006-12-03 16:41:30 +08:00
|
|
|
// A missing identifier has already been diagnosed.
|
|
|
|
ParmDeclarator.getIdentifier()) {
|
|
|
|
|
|
|
|
// Scan the argument list looking for the correct param to apply this
|
|
|
|
// type.
|
|
|
|
for (unsigned i = 0; ; ++i) {
|
|
|
|
// C99 6.9.1p6: those declarators shall declare only identifiers from
|
|
|
|
// the identifier list.
|
|
|
|
if (i == FTI.NumArgs) {
|
2008-11-18 15:48:38 +08:00
|
|
|
Diag(ParmDeclarator.getIdentifierLoc(), diag::err_no_matching_param)
|
2008-11-19 15:51:13 +08:00
|
|
|
<< ParmDeclarator.getIdentifier();
|
2006-12-03 16:41:30 +08:00
|
|
|
break;
|
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-12-03 16:41:30 +08:00
|
|
|
if (FTI.ArgInfo[i].Ident == ParmDeclarator.getIdentifier()) {
|
|
|
|
// Reject redefinitions of parameters.
|
2008-04-08 12:40:51 +08:00
|
|
|
if (FTI.ArgInfo[i].Param) {
|
2006-12-03 16:41:30 +08:00
|
|
|
Diag(ParmDeclarator.getIdentifierLoc(),
|
2008-11-18 15:48:38 +08:00
|
|
|
diag::err_param_redefinition)
|
2008-11-19 15:51:13 +08:00
|
|
|
<< ParmDeclarator.getIdentifier();
|
2006-12-03 16:41:30 +08:00
|
|
|
} else {
|
2008-04-08 12:40:51 +08:00
|
|
|
FTI.ArgInfo[i].Param = Param;
|
2006-12-03 16:41:30 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we don't have a comma, it is either the end of the list (a ';') or
|
|
|
|
// an error, bail out.
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.isNot(tok::comma))
|
2006-12-03 16:41:30 +08:00
|
|
|
break;
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-12-03 16:41:30 +08:00
|
|
|
// Consume the comma.
|
|
|
|
ConsumeToken();
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-12-03 16:41:30 +08:00
|
|
|
// Parse the next declarator.
|
|
|
|
ParmDeclarator.clear();
|
|
|
|
ParseDeclarator(ParmDeclarator);
|
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.is(tok::semi)) {
|
2006-12-03 16:41:30 +08:00
|
|
|
ConsumeToken();
|
|
|
|
} else {
|
|
|
|
Diag(Tok, diag::err_parse_error);
|
|
|
|
// Skip to end of block or statement
|
|
|
|
SkipUntil(tok::semi, true);
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.is(tok::semi))
|
2006-12-03 16:41:30 +08:00
|
|
|
ConsumeToken();
|
|
|
|
}
|
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-12-03 16:41:30 +08:00
|
|
|
// The actions module must verify that all arguments were declared.
|
2009-01-24 00:23:13 +08:00
|
|
|
Actions.ActOnFinishKNRParamDeclarations(CurScope, D);
|
2006-12-03 16:41:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-08-15 14:03:28 +08:00
|
|
|
/// ParseAsmStringLiteral - This is just a normal string-literal, but is not
|
|
|
|
/// allowed to be a wide string, and is not subject to character translation.
|
|
|
|
///
|
|
|
|
/// [GNU] asm-string-literal:
|
|
|
|
/// string-literal
|
|
|
|
///
|
2008-12-10 08:02:53 +08:00
|
|
|
Parser::OwningExprResult Parser::ParseAsmStringLiteral() {
|
2006-10-06 13:22:26 +08:00
|
|
|
if (!isTokenStringLiteral()) {
|
2006-08-15 14:03:28 +08:00
|
|
|
Diag(Tok, diag::err_expected_string_literal);
|
2008-12-12 03:30:53 +08:00
|
|
|
return ExprError();
|
2006-08-15 14:03:28 +08:00
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2008-12-12 06:51:44 +08:00
|
|
|
OwningExprResult Res(ParseStringLiteralExpression());
|
2008-12-10 08:02:53 +08:00
|
|
|
if (Res.isInvalid()) return move(Res);
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2006-08-15 14:03:28 +08:00
|
|
|
// TODO: Diagnose: wide string literal in 'asm'
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2008-12-10 08:02:53 +08:00
|
|
|
return move(Res);
|
2006-08-15 14:03:28 +08:00
|
|
|
}
|
|
|
|
|
2006-08-15 11:41:14 +08:00
|
|
|
/// ParseSimpleAsm
|
|
|
|
///
|
|
|
|
/// [GNU] simple-asm-expr:
|
|
|
|
/// 'asm' '(' asm-string-literal ')'
|
|
|
|
///
|
2009-02-10 02:23:29 +08:00
|
|
|
Parser::OwningExprResult Parser::ParseSimpleAsm(SourceLocation *EndLoc) {
|
2007-10-10 01:23:58 +08:00
|
|
|
assert(Tok.is(tok::kw_asm) && "Not an asm!");
|
2008-02-08 08:33:21 +08:00
|
|
|
SourceLocation Loc = ConsumeToken();
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2007-10-10 01:23:58 +08:00
|
|
|
if (Tok.isNot(tok::l_paren)) {
|
2008-11-18 15:48:38 +08:00
|
|
|
Diag(Tok, diag::err_expected_lparen_after) << "asm";
|
2008-12-12 03:30:53 +08:00
|
|
|
return ExprError();
|
2006-08-15 11:41:14 +08:00
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2009-02-10 02:23:29 +08:00
|
|
|
Loc = ConsumeParen();
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2008-12-10 08:02:53 +08:00
|
|
|
OwningExprResult Result(ParseAsmStringLiteral());
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2009-02-10 02:23:29 +08:00
|
|
|
if (Result.isInvalid()) {
|
|
|
|
SkipUntil(tok::r_paren, true, true);
|
|
|
|
if (EndLoc)
|
|
|
|
*EndLoc = Tok.getLocation();
|
|
|
|
ConsumeAnyToken();
|
|
|
|
} else {
|
|
|
|
Loc = MatchRHSPunctuation(tok::r_paren, Loc);
|
|
|
|
if (EndLoc)
|
|
|
|
*EndLoc = Loc;
|
|
|
|
}
|
2008-06-20 03:28:49 +08:00
|
|
|
|
2008-12-10 08:02:53 +08:00
|
|
|
return move(Result);
|
2006-08-15 11:41:14 +08:00
|
|
|
}
|
2006-10-28 07:18:49 +08:00
|
|
|
|
2008-11-09 00:45:02 +08:00
|
|
|
/// TryAnnotateTypeOrScopeToken - If the current token position is on a
|
|
|
|
/// typename (possibly qualified in C++) or a C++ scope specifier not followed
|
|
|
|
/// by a typename, TryAnnotateTypeOrScopeToken will replace one or more tokens
|
|
|
|
/// with a single annotation token representing the typename or C++ scope
|
|
|
|
/// respectively.
|
|
|
|
/// This simplifies handling of C++ scope specifiers and allows efficient
|
|
|
|
/// backtracking without the need to re-parse and resolve nested-names and
|
|
|
|
/// typenames.
|
2008-11-27 05:51:07 +08:00
|
|
|
/// It will mainly be called when we expect to treat identifiers as typenames
|
|
|
|
/// (if they are typenames). For example, in C we do not expect identifiers
|
|
|
|
/// inside expressions to be treated as typenames so it will not be called
|
|
|
|
/// for expressions in C.
|
|
|
|
/// The benefit for C/ObjC is that a typename will be annotated and
|
2009-01-29 03:39:02 +08:00
|
|
|
/// Actions.getTypeName will not be needed to be called again (e.g. getTypeName
|
2008-11-27 05:51:07 +08:00
|
|
|
/// will not be called twice, once to check whether we have a declaration
|
|
|
|
/// specifier, and another one to get the actual type inside
|
|
|
|
/// ParseDeclarationSpecifiers).
|
2009-01-05 07:23:14 +08:00
|
|
|
///
|
|
|
|
/// This returns true if the token was annotated.
|
2009-01-05 08:13:00 +08:00
|
|
|
///
|
|
|
|
/// Note that this routine emits an error if you call it with ::new or ::delete
|
|
|
|
/// as the current tokens, so only call it in contexts where these are invalid.
|
2009-01-05 11:55:46 +08:00
|
|
|
bool Parser::TryAnnotateTypeOrScopeToken() {
|
2009-01-05 09:24:05 +08:00
|
|
|
assert((Tok.is(tok::identifier) || Tok.is(tok::coloncolon)) &&
|
|
|
|
"Cannot be a type or scope token!");
|
|
|
|
|
|
|
|
// FIXME: Implement template-ids
|
2008-11-09 00:45:02 +08:00
|
|
|
CXXScopeSpec SS;
|
2008-11-27 05:41:52 +08:00
|
|
|
if (getLang().CPlusPlus)
|
2009-01-06 14:59:53 +08:00
|
|
|
ParseOptionalCXXScopeSpecifier(SS);
|
2008-11-09 00:45:02 +08:00
|
|
|
|
|
|
|
if (Tok.is(tok::identifier)) {
|
2009-01-05 09:49:50 +08:00
|
|
|
// Determine whether the identifier is a type name.
|
2009-01-29 03:39:02 +08:00
|
|
|
if (TypeTy *Ty = Actions.getTypeName(*Tok.getIdentifierInfo(),
|
2009-02-05 01:00:24 +08:00
|
|
|
Tok.getLocation(), CurScope, &SS)) {
|
2009-01-05 09:49:50 +08:00
|
|
|
// This is a typename. Replace the current token in-place with an
|
|
|
|
// annotation type token.
|
2009-01-06 13:06:21 +08:00
|
|
|
Tok.setKind(tok::annot_typename);
|
2009-01-05 09:49:50 +08:00
|
|
|
Tok.setAnnotationValue(Ty);
|
|
|
|
Tok.setAnnotationEndLoc(Tok.getLocation());
|
|
|
|
if (SS.isNotEmpty()) // it was a C++ qualified type name.
|
|
|
|
Tok.setLocation(SS.getBeginLoc());
|
|
|
|
|
|
|
|
// In case the tokens were cached, have Preprocessor replace
|
|
|
|
// them with the annotation token.
|
|
|
|
PP.AnnotateCachedTokens(Tok);
|
|
|
|
return true;
|
Implement parsing of nested-name-specifiers that involve template-ids, e.g.,
std::vector<int>::allocator_type
When we parse a template-id that names a type, it will become either a
template-id annotation (which is a parsed representation of a
template-id that has not yet been through semantic analysis) or a
typename annotation (where semantic analysis has resolved the
template-id to an actual type), depending on the context. We only
produce a type in contexts where we know that we only need type
information, e.g., in a type specifier. Otherwise, we create a
template-id annotation that can later be "upgraded" by transforming it
into a typename annotation when the parser needs a type. This occurs,
for example, when we've parsed "std::vector<int>" above and then see
the '::' after it. However, it means that when writing something like
this:
template<> class Outer::Inner<int> { ... };
We have two tokens to represent Outer::Inner<int>: one token for the
nested name specifier Outer::, and one template-id annotation token
for Inner<int>, which will be passed to semantic analysis to define
the class template specialization.
Most of the churn in the template tests in this patch come from an
improvement in our error recovery from ill-formed template-ids.
llvm-svn: 65467
2009-02-26 03:37:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!getLang().CPlusPlus) {
|
2009-01-05 09:49:50 +08:00
|
|
|
// If we're in C, we can't have :: tokens at all (the lexer won't return
|
|
|
|
// them). If the identifier is not a type, then it can't be scope either,
|
|
|
|
// just early exit.
|
|
|
|
return false;
|
2008-11-09 00:45:02 +08:00
|
|
|
}
|
2009-01-05 09:49:50 +08:00
|
|
|
|
Implement parsing of nested-name-specifiers that involve template-ids, e.g.,
std::vector<int>::allocator_type
When we parse a template-id that names a type, it will become either a
template-id annotation (which is a parsed representation of a
template-id that has not yet been through semantic analysis) or a
typename annotation (where semantic analysis has resolved the
template-id to an actual type), depending on the context. We only
produce a type in contexts where we know that we only need type
information, e.g., in a type specifier. Otherwise, we create a
template-id annotation that can later be "upgraded" by transforming it
into a typename annotation when the parser needs a type. This occurs,
for example, when we've parsed "std::vector<int>" above and then see
the '::' after it. However, it means that when writing something like
this:
template<> class Outer::Inner<int> { ... };
We have two tokens to represent Outer::Inner<int>: one token for the
nested name specifier Outer::, and one template-id annotation token
for Inner<int>, which will be passed to semantic analysis to define
the class template specialization.
Most of the churn in the template tests in this patch come from an
improvement in our error recovery from ill-formed template-ids.
llvm-svn: 65467
2009-02-26 03:37:18 +08:00
|
|
|
// If this is a template-id, annotate with a template-id or type token.
|
2009-02-10 02:46:07 +08:00
|
|
|
if (NextToken().is(tok::less)) {
|
|
|
|
DeclTy *Template;
|
|
|
|
if (TemplateNameKind TNK
|
|
|
|
= Actions.isTemplateName(*Tok.getIdentifierInfo(),
|
Implement parsing of nested-name-specifiers that involve template-ids, e.g.,
std::vector<int>::allocator_type
When we parse a template-id that names a type, it will become either a
template-id annotation (which is a parsed representation of a
template-id that has not yet been through semantic analysis) or a
typename annotation (where semantic analysis has resolved the
template-id to an actual type), depending on the context. We only
produce a type in contexts where we know that we only need type
information, e.g., in a type specifier. Otherwise, we create a
template-id annotation that can later be "upgraded" by transforming it
into a typename annotation when the parser needs a type. This occurs,
for example, when we've parsed "std::vector<int>" above and then see
the '::' after it. However, it means that when writing something like
this:
template<> class Outer::Inner<int> { ... };
We have two tokens to represent Outer::Inner<int>: one token for the
nested name specifier Outer::, and one template-id annotation token
for Inner<int>, which will be passed to semantic analysis to define
the class template specialization.
Most of the churn in the template tests in this patch come from an
improvement in our error recovery from ill-formed template-ids.
llvm-svn: 65467
2009-02-26 03:37:18 +08:00
|
|
|
CurScope, Template, &SS))
|
2009-02-10 02:46:07 +08:00
|
|
|
AnnotateTemplateIdToken(Template, TNK, &SS);
|
|
|
|
}
|
2008-12-19 03:37:40 +08:00
|
|
|
|
Implement parsing of nested-name-specifiers that involve template-ids, e.g.,
std::vector<int>::allocator_type
When we parse a template-id that names a type, it will become either a
template-id annotation (which is a parsed representation of a
template-id that has not yet been through semantic analysis) or a
typename annotation (where semantic analysis has resolved the
template-id to an actual type), depending on the context. We only
produce a type in contexts where we know that we only need type
information, e.g., in a type specifier. Otherwise, we create a
template-id annotation that can later be "upgraded" by transforming it
into a typename annotation when the parser needs a type. This occurs,
for example, when we've parsed "std::vector<int>" above and then see
the '::' after it. However, it means that when writing something like
this:
template<> class Outer::Inner<int> { ... };
We have two tokens to represent Outer::Inner<int>: one token for the
nested name specifier Outer::, and one template-id annotation token
for Inner<int>, which will be passed to semantic analysis to define
the class template specialization.
Most of the churn in the template tests in this patch come from an
improvement in our error recovery from ill-formed template-ids.
llvm-svn: 65467
2009-02-26 03:37:18 +08:00
|
|
|
// The current token, which is either an identifier or a
|
|
|
|
// template-id, is not part of the annotation. Fall through to
|
|
|
|
// push that token back into the stream and complete the C++ scope
|
|
|
|
// specifier annotation.
|
2008-11-09 00:45:02 +08:00
|
|
|
}
|
|
|
|
|
Implement parsing of nested-name-specifiers that involve template-ids, e.g.,
std::vector<int>::allocator_type
When we parse a template-id that names a type, it will become either a
template-id annotation (which is a parsed representation of a
template-id that has not yet been through semantic analysis) or a
typename annotation (where semantic analysis has resolved the
template-id to an actual type), depending on the context. We only
produce a type in contexts where we know that we only need type
information, e.g., in a type specifier. Otherwise, we create a
template-id annotation that can later be "upgraded" by transforming it
into a typename annotation when the parser needs a type. This occurs,
for example, when we've parsed "std::vector<int>" above and then see
the '::' after it. However, it means that when writing something like
this:
template<> class Outer::Inner<int> { ... };
We have two tokens to represent Outer::Inner<int>: one token for the
nested name specifier Outer::, and one template-id annotation token
for Inner<int>, which will be passed to semantic analysis to define
the class template specialization.
Most of the churn in the template tests in this patch come from an
improvement in our error recovery from ill-formed template-ids.
llvm-svn: 65467
2009-02-26 03:37:18 +08:00
|
|
|
if (Tok.is(tok::annot_template_id)) {
|
|
|
|
TemplateIdAnnotation *TemplateId
|
|
|
|
= static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue());
|
|
|
|
if (TemplateId->Kind == TNK_Class_template) {
|
|
|
|
// A template-id that refers to a type was parsed into a
|
|
|
|
// template-id annotation in a context where we weren't allowed
|
|
|
|
// to produce a type annotation token. Update the template-id
|
|
|
|
// annotation token to a type annotation token now.
|
|
|
|
return !AnnotateTemplateIdTokenAsType(&SS);
|
|
|
|
}
|
|
|
|
}
|
2008-12-19 03:37:40 +08:00
|
|
|
|
2009-01-05 06:32:19 +08:00
|
|
|
if (SS.isEmpty())
|
2009-01-05 07:23:14 +08:00
|
|
|
return false;
|
2009-01-05 06:32:19 +08:00
|
|
|
|
|
|
|
// A C++ scope specifier that isn't followed by a typename.
|
|
|
|
// Push the current token back into the token stream (or revert it if it is
|
|
|
|
// cached) and use an annotation scope token for current token.
|
|
|
|
if (PP.isBacktrackEnabled())
|
|
|
|
PP.RevertCachedTokens(1);
|
|
|
|
else
|
|
|
|
PP.EnterToken(Tok);
|
|
|
|
Tok.setKind(tok::annot_cxxscope);
|
|
|
|
Tok.setAnnotationValue(SS.getScopeRep());
|
|
|
|
Tok.setAnnotationRange(SS.getRange());
|
|
|
|
|
|
|
|
// In case the tokens were cached, have Preprocessor replace them with the
|
|
|
|
// annotation token.
|
|
|
|
PP.AnnotateCachedTokens(Tok);
|
2009-01-05 07:23:14 +08:00
|
|
|
return true;
|
2008-11-09 00:45:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// TryAnnotateScopeToken - Like TryAnnotateTypeOrScopeToken but only
|
Implement parsing of nested-name-specifiers that involve template-ids, e.g.,
std::vector<int>::allocator_type
When we parse a template-id that names a type, it will become either a
template-id annotation (which is a parsed representation of a
template-id that has not yet been through semantic analysis) or a
typename annotation (where semantic analysis has resolved the
template-id to an actual type), depending on the context. We only
produce a type in contexts where we know that we only need type
information, e.g., in a type specifier. Otherwise, we create a
template-id annotation that can later be "upgraded" by transforming it
into a typename annotation when the parser needs a type. This occurs,
for example, when we've parsed "std::vector<int>" above and then see
the '::' after it. However, it means that when writing something like
this:
template<> class Outer::Inner<int> { ... };
We have two tokens to represent Outer::Inner<int>: one token for the
nested name specifier Outer::, and one template-id annotation token
for Inner<int>, which will be passed to semantic analysis to define
the class template specialization.
Most of the churn in the template tests in this patch come from an
improvement in our error recovery from ill-formed template-ids.
llvm-svn: 65467
2009-02-26 03:37:18 +08:00
|
|
|
/// annotates C++ scope specifiers and template-ids. This returns
|
|
|
|
/// true if the token was annotated.
|
2009-01-05 08:13:00 +08:00
|
|
|
///
|
|
|
|
/// Note that this routine emits an error if you call it with ::new or ::delete
|
|
|
|
/// as the current tokens, so only call it in contexts where these are invalid.
|
2009-01-05 08:07:25 +08:00
|
|
|
bool Parser::TryAnnotateCXXScopeToken() {
|
2008-11-27 05:41:52 +08:00
|
|
|
assert(getLang().CPlusPlus &&
|
2009-01-05 06:32:19 +08:00
|
|
|
"Call sites of this function should be guarded by checking for C++");
|
2009-01-05 09:24:05 +08:00
|
|
|
assert((Tok.is(tok::identifier) || Tok.is(tok::coloncolon)) &&
|
|
|
|
"Cannot be a type or scope token!");
|
2008-11-09 00:45:02 +08:00
|
|
|
|
2008-11-27 05:41:52 +08:00
|
|
|
CXXScopeSpec SS;
|
2009-01-06 14:59:53 +08:00
|
|
|
if (!ParseOptionalCXXScopeSpecifier(SS))
|
Implement parsing of nested-name-specifiers that involve template-ids, e.g.,
std::vector<int>::allocator_type
When we parse a template-id that names a type, it will become either a
template-id annotation (which is a parsed representation of a
template-id that has not yet been through semantic analysis) or a
typename annotation (where semantic analysis has resolved the
template-id to an actual type), depending on the context. We only
produce a type in contexts where we know that we only need type
information, e.g., in a type specifier. Otherwise, we create a
template-id annotation that can later be "upgraded" by transforming it
into a typename annotation when the parser needs a type. This occurs,
for example, when we've parsed "std::vector<int>" above and then see
the '::' after it. However, it means that when writing something like
this:
template<> class Outer::Inner<int> { ... };
We have two tokens to represent Outer::Inner<int>: one token for the
nested name specifier Outer::, and one template-id annotation token
for Inner<int>, which will be passed to semantic analysis to define
the class template specialization.
Most of the churn in the template tests in this patch come from an
improvement in our error recovery from ill-formed template-ids.
llvm-svn: 65467
2009-02-26 03:37:18 +08:00
|
|
|
return Tok.is(tok::annot_template_id);
|
2008-11-09 00:45:02 +08:00
|
|
|
|
2009-01-05 06:32:19 +08:00
|
|
|
// Push the current token back into the token stream (or revert it if it is
|
|
|
|
// cached) and use an annotation scope token for current token.
|
|
|
|
if (PP.isBacktrackEnabled())
|
|
|
|
PP.RevertCachedTokens(1);
|
|
|
|
else
|
|
|
|
PP.EnterToken(Tok);
|
|
|
|
Tok.setKind(tok::annot_cxxscope);
|
|
|
|
Tok.setAnnotationValue(SS.getScopeRep());
|
|
|
|
Tok.setAnnotationRange(SS.getRange());
|
|
|
|
|
|
|
|
// In case the tokens were cached, have Preprocessor replace them with the
|
|
|
|
// annotation token.
|
|
|
|
PP.AnnotateCachedTokens(Tok);
|
2009-01-05 08:07:25 +08:00
|
|
|
return true;
|
2008-11-09 00:45:02 +08:00
|
|
|
}
|