2018-06-23 01:39:19 +08:00
|
|
|
//===- Lexer.cpp - MLIR Lexer Implementation ------------------------------===//
|
|
|
|
//
|
|
|
|
// Copyright 2019 The MLIR Authors.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
// =============================================================================
|
|
|
|
//
|
|
|
|
// This file implements the lexer for the MLIR textual form.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "Lexer.h"
|
|
|
|
#include "llvm/Support/SourceMgr.h"
|
|
|
|
using namespace mlir;
|
|
|
|
using llvm::SMLoc;
|
|
|
|
using llvm::SourceMgr;
|
|
|
|
|
2018-06-28 02:03:08 +08:00
|
|
|
// Returns true if 'c' is an allowable puncuation character: [$._-]
|
|
|
|
// Returns false otherwise.
|
|
|
|
static bool isPunct(char c) {
|
|
|
|
return c == '$' || c == '.' || c == '_' || c == '-';
|
|
|
|
}
|
|
|
|
|
2018-07-12 04:26:23 +08:00
|
|
|
Lexer::Lexer(llvm::SourceMgr &sourceMgr, SMDiagnosticHandlerTy errorReporter)
|
2018-06-25 10:17:35 +08:00
|
|
|
: sourceMgr(sourceMgr), errorReporter(errorReporter) {
|
2018-06-23 01:39:19 +08:00
|
|
|
auto bufferID = sourceMgr.getMainFileID();
|
|
|
|
curBuffer = sourceMgr.getMemoryBuffer(bufferID)->getBuffer();
|
|
|
|
curPtr = curBuffer.begin();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// emitError - Emit an error message and return an Token::error token.
|
|
|
|
Token Lexer::emitError(const char *loc, const Twine &message) {
|
2018-06-25 10:17:35 +08:00
|
|
|
errorReporter(sourceMgr.GetMessage(SMLoc::getFromPointer(loc),
|
|
|
|
SourceMgr::DK_Error, message));
|
2018-06-23 01:39:19 +08:00
|
|
|
return formToken(Token::error, loc);
|
|
|
|
}
|
|
|
|
|
|
|
|
Token Lexer::lexToken() {
|
|
|
|
const char *tokStart = curPtr;
|
|
|
|
|
|
|
|
switch (*curPtr++) {
|
|
|
|
default:
|
|
|
|
// Handle bare identifiers.
|
|
|
|
if (isalpha(curPtr[-1]))
|
|
|
|
return lexBareIdentifierOrKeyword(tokStart);
|
|
|
|
|
|
|
|
// Unknown character, emit an error.
|
|
|
|
return emitError(tokStart, "unexpected character");
|
|
|
|
|
|
|
|
case 0:
|
|
|
|
// This may either be a nul character in the source file or may be the EOF
|
|
|
|
// marker that llvm::MemoryBuffer guarantees will be there.
|
|
|
|
if (curPtr-1 == curBuffer.end())
|
|
|
|
return formToken(Token::eof, tokStart);
|
|
|
|
|
|
|
|
LLVM_FALLTHROUGH;
|
|
|
|
case ' ':
|
|
|
|
case '\t':
|
|
|
|
case '\n':
|
|
|
|
case '\r':
|
|
|
|
// Ignore whitespace.
|
|
|
|
return lexToken();
|
|
|
|
|
2018-06-24 07:03:42 +08:00
|
|
|
case ':': return formToken(Token::colon, tokStart);
|
2018-06-23 06:52:02 +08:00
|
|
|
case ',': return formToken(Token::comma, tokStart);
|
2018-06-23 01:39:19 +08:00
|
|
|
case '(': return formToken(Token::l_paren, tokStart);
|
|
|
|
case ')': return formToken(Token::r_paren, tokStart);
|
2018-06-24 07:03:42 +08:00
|
|
|
case '{': return formToken(Token::l_brace, tokStart);
|
|
|
|
case '}': return formToken(Token::r_brace, tokStart);
|
2018-06-30 09:09:29 +08:00
|
|
|
case '[': return formToken(Token::l_bracket, tokStart);
|
|
|
|
case ']': return formToken(Token::r_bracket, tokStart);
|
2018-06-23 01:39:19 +08:00
|
|
|
case '<': return formToken(Token::less, tokStart);
|
|
|
|
case '>': return formToken(Token::greater, tokStart);
|
2018-06-30 09:09:29 +08:00
|
|
|
case '=': return formToken(Token::equal, tokStart);
|
2018-06-23 01:39:19 +08:00
|
|
|
|
2018-06-30 09:09:29 +08:00
|
|
|
case '+': return formToken(Token::plus, tokStart);
|
|
|
|
case '*': return formToken(Token::star, tokStart);
|
2018-06-23 06:52:02 +08:00
|
|
|
case '-':
|
|
|
|
if (*curPtr == '>') {
|
|
|
|
++curPtr;
|
|
|
|
return formToken(Token::arrow, tokStart);
|
|
|
|
}
|
2018-07-04 11:16:08 +08:00
|
|
|
return formToken(Token::minus, tokStart);
|
2018-06-23 06:52:02 +08:00
|
|
|
|
|
|
|
case '?':
|
|
|
|
if (*curPtr == '?') {
|
|
|
|
++curPtr;
|
|
|
|
return formToken(Token::questionquestion, tokStart);
|
|
|
|
}
|
|
|
|
|
|
|
|
return formToken(Token::question, tokStart);
|
|
|
|
|
2018-07-15 14:06:24 +08:00
|
|
|
case '/':
|
|
|
|
if (*curPtr == '/')
|
|
|
|
return lexComment();
|
|
|
|
return emitError(tokStart, "unexpected character");
|
|
|
|
|
2018-06-23 01:39:19 +08:00
|
|
|
case '@': return lexAtIdentifier(tokStart);
|
2018-07-08 06:48:26 +08:00
|
|
|
case '#':
|
|
|
|
LLVM_FALLTHROUGH;
|
|
|
|
case '%':
|
|
|
|
return lexPrefixedIdentifier(tokStart);
|
2018-06-29 11:45:33 +08:00
|
|
|
case '"': return lexString(tokStart);
|
2018-06-23 06:52:02 +08:00
|
|
|
|
|
|
|
case '0': case '1': case '2': case '3': case '4':
|
|
|
|
case '5': case '6': case '7': case '8': case '9':
|
|
|
|
return lexNumber(tokStart);
|
2018-06-23 01:39:19 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Lex a comment line, starting with a semicolon.
|
|
|
|
///
|
|
|
|
/// TODO: add a regex for comments here and to the spec.
|
|
|
|
///
|
|
|
|
Token Lexer::lexComment() {
|
2018-07-15 14:06:24 +08:00
|
|
|
// Advance over the second '/' in a '//' comment.
|
|
|
|
assert(*curPtr == '/');
|
|
|
|
++curPtr;
|
|
|
|
|
2018-06-23 01:39:19 +08:00
|
|
|
while (true) {
|
|
|
|
switch (*curPtr++) {
|
|
|
|
case '\n':
|
|
|
|
case '\r':
|
|
|
|
// Newline is end of comment.
|
|
|
|
return lexToken();
|
|
|
|
case 0:
|
|
|
|
// If this is the end of the buffer, end the comment.
|
|
|
|
if (curPtr-1 == curBuffer.end()) {
|
|
|
|
--curPtr;
|
|
|
|
return lexToken();
|
|
|
|
}
|
|
|
|
LLVM_FALLTHROUGH;
|
|
|
|
default:
|
|
|
|
// Skip over other characters.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Lex a bare identifier or keyword that starts with a letter.
|
|
|
|
///
|
2018-06-25 02:18:29 +08:00
|
|
|
/// bare-id ::= letter (letter|digit|[_])*
|
2018-06-30 13:08:05 +08:00
|
|
|
/// integer-type ::= `i[1-9][0-9]*`
|
2018-06-23 01:39:19 +08:00
|
|
|
///
|
|
|
|
Token Lexer::lexBareIdentifierOrKeyword(const char *tokStart) {
|
2018-06-25 02:18:29 +08:00
|
|
|
// Match the rest of the identifier regex: [0-9a-zA-Z_]*
|
|
|
|
while (isalpha(*curPtr) || isdigit(*curPtr) || *curPtr == '_')
|
2018-06-23 01:39:19 +08:00
|
|
|
++curPtr;
|
|
|
|
|
|
|
|
// Check to see if this identifier is a keyword.
|
|
|
|
StringRef spelling(tokStart, curPtr-tokStart);
|
|
|
|
|
2018-06-30 13:08:05 +08:00
|
|
|
// Check for i123.
|
|
|
|
if (tokStart[0] == 'i') {
|
|
|
|
bool allDigits = true;
|
|
|
|
for (auto c : spelling.drop_front())
|
|
|
|
allDigits &= isdigit(c) != 0;
|
|
|
|
if (allDigits && spelling.size() != 1)
|
|
|
|
return Token(Token::inttype, spelling);
|
|
|
|
}
|
|
|
|
|
2018-06-30 02:15:56 +08:00
|
|
|
Token::Kind kind = llvm::StringSwitch<Token::Kind>(spelling)
|
|
|
|
#define TOK_KEYWORD(SPELLING) \
|
|
|
|
.Case(#SPELLING, Token::kw_##SPELLING)
|
|
|
|
#include "TokenKinds.def"
|
2018-06-23 01:39:19 +08:00
|
|
|
.Default(Token::bare_identifier);
|
|
|
|
|
|
|
|
return Token(kind, spelling);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Lex an '@foo' identifier.
|
|
|
|
///
|
|
|
|
/// function-id ::= `@` bare-id
|
|
|
|
///
|
|
|
|
Token Lexer::lexAtIdentifier(const char *tokStart) {
|
|
|
|
// These always start with a letter.
|
|
|
|
if (!isalpha(*curPtr++))
|
|
|
|
return emitError(curPtr-1, "expected letter in @ identifier");
|
|
|
|
|
2018-06-25 02:18:29 +08:00
|
|
|
while (isalpha(*curPtr) || isdigit(*curPtr) || *curPtr == '_')
|
2018-06-23 01:39:19 +08:00
|
|
|
++curPtr;
|
|
|
|
return formToken(Token::at_identifier, tokStart);
|
|
|
|
}
|
2018-06-23 06:52:02 +08:00
|
|
|
|
2018-07-08 06:48:26 +08:00
|
|
|
/// Lex an identifier that starts with a prefix followed by suffix-id.
|
2018-06-28 02:03:08 +08:00
|
|
|
///
|
|
|
|
/// affine-map-id ::= `#` suffix-id
|
2018-07-08 06:48:26 +08:00
|
|
|
/// ssa-id ::= '%' suffix-id
|
2018-06-28 02:03:08 +08:00
|
|
|
/// suffix-id ::= digit+ | (letter|id-punct) (letter|id-punct|digit)*
|
|
|
|
///
|
2018-07-08 06:48:26 +08:00
|
|
|
Token Lexer::lexPrefixedIdentifier(const char *tokStart) {
|
|
|
|
Token::Kind kind;
|
|
|
|
StringRef errorKind;
|
|
|
|
switch (*tokStart) {
|
|
|
|
case '#':
|
|
|
|
kind = Token::hash_identifier;
|
|
|
|
errorKind = "invalid affine map name";
|
|
|
|
break;
|
|
|
|
case '%':
|
|
|
|
kind = Token::percent_identifier;
|
|
|
|
errorKind = "invalid SSA name";
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
llvm_unreachable("invalid caller");
|
|
|
|
}
|
|
|
|
|
2018-06-28 02:03:08 +08:00
|
|
|
// Parse suffix-id.
|
|
|
|
if (isdigit(*curPtr)) {
|
|
|
|
// If suffix-id starts with a digit, the rest must be digits.
|
|
|
|
while (isdigit(*curPtr)) {
|
|
|
|
++curPtr;
|
|
|
|
}
|
|
|
|
} else if (isalpha(*curPtr) || isPunct(*curPtr)) {
|
|
|
|
do {
|
|
|
|
++curPtr;
|
|
|
|
} while (isalpha(*curPtr) || isdigit(*curPtr) || isPunct(*curPtr));
|
|
|
|
} else {
|
2018-07-08 06:48:26 +08:00
|
|
|
return emitError(curPtr - 1, errorKind);
|
2018-06-28 02:03:08 +08:00
|
|
|
}
|
2018-07-08 06:48:26 +08:00
|
|
|
|
|
|
|
return formToken(kind, tokStart);
|
2018-06-28 02:03:08 +08:00
|
|
|
}
|
|
|
|
|
2018-06-23 06:52:02 +08:00
|
|
|
/// Lex an integer literal.
|
|
|
|
///
|
|
|
|
/// integer-literal ::= digit+ | `0x` hex_digit+
|
|
|
|
///
|
|
|
|
Token Lexer::lexNumber(const char *tokStart) {
|
|
|
|
assert(isdigit(curPtr[-1]));
|
|
|
|
|
|
|
|
// Handle the hexadecimal case.
|
|
|
|
if (curPtr[-1] == '0' && *curPtr == 'x') {
|
|
|
|
++curPtr;
|
|
|
|
|
|
|
|
if (!isxdigit(*curPtr))
|
|
|
|
return emitError(curPtr, "expected hexadecimal digit");
|
|
|
|
|
|
|
|
while (isxdigit(*curPtr))
|
|
|
|
++curPtr;
|
|
|
|
|
|
|
|
return formToken(Token::integer, tokStart);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle the normal decimal case.
|
|
|
|
while (isdigit(*curPtr))
|
|
|
|
++curPtr;
|
|
|
|
|
|
|
|
return formToken(Token::integer, tokStart);
|
|
|
|
}
|
2018-06-29 11:45:33 +08:00
|
|
|
|
|
|
|
/// Lex a string literal.
|
|
|
|
///
|
|
|
|
/// string-literal ::= '"' [^"\n\f\v\r]* '"'
|
|
|
|
///
|
|
|
|
/// TODO: define escaping rules.
|
|
|
|
Token Lexer::lexString(const char *tokStart) {
|
|
|
|
assert(curPtr[-1] == '"');
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
switch (*curPtr++) {
|
|
|
|
case '"':
|
|
|
|
return formToken(Token::string, tokStart);
|
|
|
|
case '0':
|
|
|
|
// If this is a random nul character in the middle of a string, just
|
|
|
|
// include it. If it is the end of file, then it is an error.
|
|
|
|
if (curPtr-1 != curBuffer.end())
|
|
|
|
continue;
|
|
|
|
LLVM_FALLTHROUGH;
|
|
|
|
case '\n':
|
|
|
|
case '\v':
|
|
|
|
case '\f':
|
|
|
|
return emitError(curPtr-1, "expected '\"' in string literal");
|
|
|
|
|
|
|
|
default:
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-06-30 09:09:29 +08:00
|
|
|
|