| Index: pkg/analyzer/lib/src/generated/scanner.dart
|
| diff --git a/pkg/analyzer/lib/src/generated/scanner.dart b/pkg/analyzer/lib/src/generated/scanner.dart
|
| index a12c4eacee318f94aa19b927ca95979a19f82773..9eca58cdf947206a93c3d6f0c5ddf4668650603c 100644
|
| --- a/pkg/analyzer/lib/src/generated/scanner.dart
|
| +++ b/pkg/analyzer/lib/src/generated/scanner.dart
|
| @@ -7,14 +7,14 @@
|
|
|
| library engine.scanner;
|
|
|
| -import 'dart:collection';
|
| import "dart:math" as math;
|
| +import 'dart:collection';
|
|
|
| +import 'error.dart';
|
| +import 'instrumentation.dart';
|
| import 'java_core.dart';
|
| import 'java_engine.dart';
|
| import 'source.dart';
|
| -import 'error.dart';
|
| -import 'instrumentation.dart';
|
| import 'utilities_collection.dart' show TokenMap;
|
|
|
| /**
|
| @@ -34,7 +34,10 @@ class BeginToken extends Token {
|
| * @param offset the offset from the beginning of the file to the first character in the token
|
| */
|
| BeginToken(TokenType type, int offset) : super(type, offset) {
|
| - assert((type == TokenType.OPEN_CURLY_BRACKET || type == TokenType.OPEN_PAREN || type == TokenType.OPEN_SQUARE_BRACKET || type == TokenType.STRING_INTERPOLATION_EXPRESSION));
|
| + assert((type == TokenType.OPEN_CURLY_BRACKET ||
|
| + type == TokenType.OPEN_PAREN ||
|
| + type == TokenType.OPEN_SQUARE_BRACKET ||
|
| + type == TokenType.STRING_INTERPOLATION_EXPRESSION));
|
| }
|
|
|
| @override
|
| @@ -59,10 +62,8 @@ class BeginTokenWithComment extends BeginToken {
|
| * @param offset the offset from the beginning of the file to the first character in the token
|
| * @param precedingComment the first comment in the list of comments that precede this token
|
| */
|
| - BeginTokenWithComment(TokenType type, int offset, this._precedingComment) : super(type, offset);
|
| -
|
| - @override
|
| - Token copy() => new BeginTokenWithComment(type, offset, copyComments(_precedingComment));
|
| + BeginTokenWithComment(TokenType type, int offset, this._precedingComment)
|
| + : super(type, offset);
|
|
|
| @override
|
| Token get precedingComments => _precedingComment;
|
| @@ -76,6 +77,58 @@ class BeginTokenWithComment extends BeginToken {
|
| token = token.next;
|
| }
|
| }
|
| +
|
| + @override
|
| + Token copy() =>
|
| + new BeginTokenWithComment(type, offset, copyComments(_precedingComment));
|
| +}
|
| +
|
| +/**
|
| + * The interface `CharacterReader`
|
| + */
|
| +abstract class CharacterReader {
|
| + /**
|
| + * Return the current offset relative to the beginning of the source. Return the initial offset if
|
| + * the scanner has not yet scanned the source code, and one (1) past the end of the source code if
|
| + * the entire source code has been scanned.
|
| + *
|
| + * @return the current offset of the scanner in the source
|
| + */
|
| + int get offset;
|
| +
|
| + /**
|
| + * Set the current offset relative to the beginning of the source. The new offset must be between
|
| + * the initial offset and one (1) past the end of the source code.
|
| + *
|
| + * @param offset the new offset in the source
|
| + */
|
| + void set offset(int offset);
|
| +
|
| + /**
|
| + * Advance the current position and return the character at the new current position.
|
| + *
|
| + * @return the character at the new current position
|
| + */
|
| + int advance();
|
| +
|
| + /**
|
| + * Return the substring of the source code between the start offset and the modified current
|
| + * position. The current position is modified by adding the end delta.
|
| + *
|
| + * @param start the offset to the beginning of the string, relative to the start of the file
|
| + * @param endDelta the number of characters after the current location to be included in the
|
| + * string, or the number of characters before the current location to be excluded if the
|
| + * offset is negative
|
| + * @return the specified substring of the source code
|
| + */
|
| + String getString(int start, int endDelta);
|
| +
|
| + /**
|
| + * Return the character at the current position without changing the current position.
|
| + *
|
| + * @return the character at the current position
|
| + */
|
| + int peek();
|
| }
|
|
|
| /**
|
| @@ -109,6 +162,14 @@ class CharSequenceReader implements CharacterReader {
|
| }
|
|
|
| @override
|
| + int get offset => _charOffset;
|
| +
|
| + @override
|
| + void set offset(int offset) {
|
| + _charOffset = offset;
|
| + }
|
| +
|
| + @override
|
| int advance() {
|
| if (_charOffset + 1 >= _stringLength) {
|
| return -1;
|
| @@ -117,10 +178,8 @@ class CharSequenceReader implements CharacterReader {
|
| }
|
|
|
| @override
|
| - int get offset => _charOffset;
|
| -
|
| - @override
|
| - String getString(int start, int endDelta) => _sequence.substring(start, _charOffset + 1 + endDelta).toString();
|
| + String getString(int start, int endDelta) =>
|
| + _sequence.substring(start, _charOffset + 1 + endDelta).toString();
|
|
|
| @override
|
| int peek() {
|
| @@ -129,59 +188,6 @@ class CharSequenceReader implements CharacterReader {
|
| }
|
| return _sequence.codeUnitAt(_charOffset + 1);
|
| }
|
| -
|
| - @override
|
| - void set offset(int offset) {
|
| - _charOffset = offset;
|
| - }
|
| -}
|
| -
|
| -/**
|
| - * The interface `CharacterReader`
|
| - */
|
| -abstract class CharacterReader {
|
| - /**
|
| - * Advance the current position and return the character at the new current position.
|
| - *
|
| - * @return the character at the new current position
|
| - */
|
| - int advance();
|
| -
|
| - /**
|
| - * Return the current offset relative to the beginning of the source. Return the initial offset if
|
| - * the scanner has not yet scanned the source code, and one (1) past the end of the source code if
|
| - * the entire source code has been scanned.
|
| - *
|
| - * @return the current offset of the scanner in the source
|
| - */
|
| - int get offset;
|
| -
|
| - /**
|
| - * Return the substring of the source code between the start offset and the modified current
|
| - * position. The current position is modified by adding the end delta.
|
| - *
|
| - * @param start the offset to the beginning of the string, relative to the start of the file
|
| - * @param endDelta the number of characters after the current location to be included in the
|
| - * string, or the number of characters before the current location to be excluded if the
|
| - * offset is negative
|
| - * @return the specified substring of the source code
|
| - */
|
| - String getString(int start, int endDelta);
|
| -
|
| - /**
|
| - * Return the character at the current position without changing the current position.
|
| - *
|
| - * @return the character at the current position
|
| - */
|
| - int peek();
|
| -
|
| - /**
|
| - * Set the current offset relative to the beginning of the source. The new offset must be between
|
| - * the initial offset and one (1) past the end of the source code.
|
| - *
|
| - * @param offset the new offset in the source
|
| - */
|
| - void set offset(int offset);
|
| }
|
|
|
| /**
|
| @@ -225,11 +231,21 @@ class IncrementalScanner extends Scanner {
|
| * @param reader the character reader used to read the characters in the source
|
| * @param errorListener the error listener that will be informed of any errors that are found
|
| */
|
| - IncrementalScanner(Source source, CharacterReader reader, AnalysisErrorListener errorListener) : super(source, reader, errorListener) {
|
| + IncrementalScanner(Source source, CharacterReader reader,
|
| + AnalysisErrorListener errorListener)
|
| + : super(source, reader, errorListener) {
|
| this._reader = reader;
|
| }
|
|
|
| /**
|
| + * Return `true` if there were any tokens either added or removed (or both) as a result of
|
| + * the modification.
|
| + *
|
| + * @return `true` if there were any tokens changed as a result of the modification
|
| + */
|
| + bool get hasNonWhitespaceChange => _hasNonWhitespaceChange;
|
| +
|
| + /**
|
| * Return the token in the new token stream immediately to the left of the range of tokens that
|
| * were inserted, or the token immediately to the left of the modified region if there were no new
|
| * tokens.
|
| @@ -255,14 +271,6 @@ class IncrementalScanner extends Scanner {
|
| TokenMap get tokenMap => _tokenMap;
|
|
|
| /**
|
| - * Return `true` if there were any tokens either added or removed (or both) as a result of
|
| - * the modification.
|
| - *
|
| - * @return `true` if there were any tokens changed as a result of the modification
|
| - */
|
| - bool get hasNonWhitespaceChange => _hasNonWhitespaceChange;
|
| -
|
| - /**
|
| * Given the stream of tokens scanned from the original source, the modified source (the result of
|
| * replacing one contiguous range of characters with another string of characters), and a
|
| * specification of the modification that was made, return a stream of tokens scanned from the
|
| @@ -274,11 +282,13 @@ class IncrementalScanner extends Scanner {
|
| * @param removedLength the number of characters removed from the original source
|
| * @param insertedLength the number of characters added to the modified source
|
| */
|
| - Token rescan(Token originalStream, int index, int removedLength, int insertedLength) {
|
| + Token rescan(Token originalStream, int index, int removedLength,
|
| + int insertedLength) {
|
| //
|
| - // Copy all of the tokens in the originalStream whose end is less than the replacement start.
|
| - // (If the replacement start is equal to the end of an existing token, then it means that the
|
| - // existing token might have been modified, so we need to rescan it.)
|
| + // Copy all of the tokens in the originalStream whose end is less than the
|
| + // replacement start. (If the replacement start is equal to the end of an
|
| + // existing token, then it means that the existing token might have been
|
| + // modified, so we need to rescan it.)
|
| //
|
| while (originalStream.type != TokenType.EOF && originalStream.end < index) {
|
| originalStream = _copyAndAdvance(originalStream, 0);
|
| @@ -287,18 +297,20 @@ class IncrementalScanner extends Scanner {
|
| Token oldLeftToken = originalStream.previous;
|
| _leftToken = tail;
|
| //
|
| - // Skip tokens in the original stream until we find a token whose offset is greater than the end
|
| - // of the removed region. (If the end of the removed region is equal to the beginning of an
|
| - // existing token, then it means that the existing token might have been modified, so we need to
|
| - // rescan it.)
|
| + // Skip tokens in the original stream until we find a token whose offset is
|
| + // greater than the end of the removed region. (If the end of the removed
|
| + // region is equal to the beginning of an existing token, then it means that
|
| + // the existing token might have been modified, so we need to rescan it.)
|
| //
|
| int removedEnd = index + (removedLength == 0 ? 0 : removedLength - 1);
|
| - while (originalStream.type != TokenType.EOF && originalStream.offset <= removedEnd) {
|
| + while (originalStream.type != TokenType.EOF &&
|
| + originalStream.offset <= removedEnd) {
|
| originalStream = originalStream.next;
|
| }
|
| Token oldLast;
|
| Token oldRightToken;
|
| - if (originalStream.type != TokenType.EOF && removedEnd + 1 == originalStream.offset) {
|
| + if (originalStream.type != TokenType.EOF &&
|
| + removedEnd + 1 == originalStream.offset) {
|
| oldLast = originalStream;
|
| originalStream = originalStream.next;
|
| oldRightToken = originalStream;
|
| @@ -307,22 +319,25 @@ class IncrementalScanner extends Scanner {
|
| oldRightToken = originalStream;
|
| }
|
| //
|
| - // Compute the delta between the character index of characters after the modified region in the
|
| - // original source and the index of the corresponding character in the modified source.
|
| + // Compute the delta between the character index of characters after the
|
| + // modified region in the original source and the index of the corresponding
|
| + // character in the modified source.
|
| //
|
| int delta = insertedLength - removedLength;
|
| //
|
| - // Compute the range of characters that are known to need to be rescanned. If the index is
|
| - // within an existing token, then we need to start at the beginning of the token.
|
| + // Compute the range of characters that are known to need to be rescanned.
|
| + // If the index is within an existing token, then we need to start at the
|
| + // beginning of the token.
|
| //
|
| int scanStart = math.min(oldFirst.offset, index);
|
| int oldEnd = oldLast.end + delta - 1;
|
| int newEnd = index + insertedLength - 1;
|
| int scanEnd = math.max(newEnd, oldEnd);
|
| //
|
| - // Starting at the start of the scan region, scan tokens from the modifiedSource until the end
|
| - // of the just scanned token is greater than or equal to end of the scan region in the modified
|
| - // source. Include trailing characters of any token that was split as a result of inserted text,
|
| + // Starting at the start of the scan region, scan tokens from the
|
| + // modifiedSource until the end of the just scanned token is greater than or
|
| + // equal to end of the scan region in the modified source. Include trailing
|
| + // characters of any token that was split as a result of inserted text,
|
| // as in "ab" --> "a.b".
|
| //
|
| _reader.offset = scanStart - 1;
|
| @@ -331,7 +346,8 @@ class IncrementalScanner extends Scanner {
|
| next = bigSwitch(next);
|
| }
|
| //
|
| - // Copy the remaining tokens in the original stream, but apply the delta to the token's offset.
|
| + // Copy the remaining tokens in the original stream, but apply the delta to
|
| + // the token's offset.
|
| //
|
| if (originalStream.type == TokenType.EOF) {
|
| _copyAndAdvance(originalStream, delta);
|
| @@ -347,13 +363,17 @@ class IncrementalScanner extends Scanner {
|
| eof.setNextWithoutSettingPrevious(eof);
|
| }
|
| //
|
| - // If the index is immediately after an existing token and the inserted characters did not
|
| - // change that original token, then adjust the leftToken to be the next token. For example, in
|
| - // "a; c;" --> "a;b c;", the leftToken was ";", but this code advances it to "b" since "b" is
|
| - // the first new token.
|
| + // If the index is immediately after an existing token and the inserted
|
| + // characters did not change that original token, then adjust the leftToken
|
| + // to be the next token. For example, in "a; c;" --> "a;b c;", the leftToken
|
| + // was ";", but this code advances it to "b" since "b" is the first new
|
| + // token.
|
| //
|
| Token newFirst = _leftToken.next;
|
| - while (!identical(newFirst, _rightToken) && !identical(oldFirst, oldRightToken) && newFirst.type != TokenType.EOF && _equalTokens(oldFirst, newFirst)) {
|
| + while (!identical(newFirst, _rightToken) &&
|
| + !identical(oldFirst, oldRightToken) &&
|
| + newFirst.type != TokenType.EOF &&
|
| + _equalTokens(oldFirst, newFirst)) {
|
| _tokenMap.put(oldFirst, newFirst);
|
| oldLeftToken = oldFirst;
|
| oldFirst = oldFirst.next;
|
| @@ -361,18 +381,23 @@ class IncrementalScanner extends Scanner {
|
| newFirst = newFirst.next;
|
| }
|
| Token newLast = _rightToken.previous;
|
| - while (!identical(newLast, _leftToken) && !identical(oldLast, oldLeftToken) && newLast.type != TokenType.EOF && _equalTokens(oldLast, newLast)) {
|
| + while (!identical(newLast, _leftToken) &&
|
| + !identical(oldLast, oldLeftToken) &&
|
| + newLast.type != TokenType.EOF &&
|
| + _equalTokens(oldLast, newLast)) {
|
| _tokenMap.put(oldLast, newLast);
|
| oldRightToken = oldLast;
|
| oldLast = oldLast.previous;
|
| _rightToken = newLast;
|
| newLast = newLast.previous;
|
| }
|
| - _hasNonWhitespaceChange = !identical(_leftToken.next, _rightToken) || !identical(oldLeftToken.next, oldRightToken);
|
| + _hasNonWhitespaceChange = !identical(_leftToken.next, _rightToken) ||
|
| + !identical(oldLeftToken.next, oldRightToken);
|
| //
|
| - // TODO(brianwilkerson) Begin tokens are not getting associated with the corresponding end
|
| - // tokens (because the end tokens have not been copied when we're copying the begin tokens).
|
| - // This could have implications for parsing.
|
| + // TODO(brianwilkerson) Begin tokens are not getting associated with the
|
| + // corresponding end tokens (because the end tokens have not been copied
|
| + // when we're copying the begin tokens). This could have implications for
|
| + // parsing.
|
| // TODO(brianwilkerson) Update the lineInfo.
|
| //
|
| return firstToken;
|
| @@ -401,7 +426,10 @@ class IncrementalScanner extends Scanner {
|
| * @param newToken the token from the new stream that is being compared
|
| * @return `true` if the two tokens are equal to each other
|
| */
|
| - bool _equalTokens(Token oldToken, Token newToken) => oldToken.type == newToken.type && oldToken.length == newToken.length && oldToken.lexeme == newToken.lexeme;
|
| + bool _equalTokens(Token oldToken, Token newToken) =>
|
| + oldToken.type == newToken.type &&
|
| + oldToken.length == newToken.length &&
|
| + oldToken.lexeme == newToken.lexeme;
|
| }
|
|
|
| /**
|
| @@ -474,37 +502,49 @@ class Keyword extends Enum<Keyword> {
|
|
|
| static const Keyword WITH = const Keyword.con1('WITH', 32, "with");
|
|
|
| - static const Keyword ABSTRACT = const Keyword.con2('ABSTRACT', 33, "abstract", true);
|
| + static const Keyword ABSTRACT =
|
| + const Keyword.con2('ABSTRACT', 33, "abstract", true);
|
|
|
| static const Keyword AS = const Keyword.con2('AS', 34, "as", true);
|
|
|
| - static const Keyword DEFERRED = const Keyword.con2('DEFERRED', 35, "deferred", true);
|
| + static const Keyword DEFERRED =
|
| + const Keyword.con2('DEFERRED', 35, "deferred", true);
|
|
|
| - static const Keyword DYNAMIC = const Keyword.con2('DYNAMIC', 36, "dynamic", true);
|
| + static const Keyword DYNAMIC =
|
| + const Keyword.con2('DYNAMIC', 36, "dynamic", true);
|
|
|
| - static const Keyword EXPORT = const Keyword.con2('EXPORT', 37, "export", true);
|
| + static const Keyword EXPORT =
|
| + const Keyword.con2('EXPORT', 37, "export", true);
|
|
|
| - static const Keyword EXTERNAL = const Keyword.con2('EXTERNAL', 38, "external", true);
|
| + static const Keyword EXTERNAL =
|
| + const Keyword.con2('EXTERNAL', 38, "external", true);
|
|
|
| - static const Keyword FACTORY = const Keyword.con2('FACTORY', 39, "factory", true);
|
| + static const Keyword FACTORY =
|
| + const Keyword.con2('FACTORY', 39, "factory", true);
|
|
|
| static const Keyword GET = const Keyword.con2('GET', 40, "get", true);
|
|
|
| - static const Keyword IMPLEMENTS = const Keyword.con2('IMPLEMENTS', 41, "implements", true);
|
| + static const Keyword IMPLEMENTS =
|
| + const Keyword.con2('IMPLEMENTS', 41, "implements", true);
|
|
|
| - static const Keyword IMPORT = const Keyword.con2('IMPORT', 42, "import", true);
|
| + static const Keyword IMPORT =
|
| + const Keyword.con2('IMPORT', 42, "import", true);
|
|
|
| - static const Keyword LIBRARY = const Keyword.con2('LIBRARY', 43, "library", true);
|
| + static const Keyword LIBRARY =
|
| + const Keyword.con2('LIBRARY', 43, "library", true);
|
|
|
| - static const Keyword OPERATOR = const Keyword.con2('OPERATOR', 44, "operator", true);
|
| + static const Keyword OPERATOR =
|
| + const Keyword.con2('OPERATOR', 44, "operator", true);
|
|
|
| static const Keyword PART = const Keyword.con2('PART', 45, "part", true);
|
|
|
| static const Keyword SET = const Keyword.con2('SET', 46, "set", true);
|
|
|
| - static const Keyword STATIC = const Keyword.con2('STATIC', 47, "static", true);
|
| + static const Keyword STATIC =
|
| + const Keyword.con2('STATIC', 47, "static", true);
|
|
|
| - static const Keyword TYPEDEF = const Keyword.con2('TYPEDEF', 48, "typedef", true);
|
| + static const Keyword TYPEDEF =
|
| + const Keyword.con2('TYPEDEF', 48, "typedef", true);
|
|
|
| static const List<Keyword> values = const [
|
| ASSERT,
|
| @@ -558,6 +598,11 @@ class Keyword extends Enum<Keyword> {
|
| TYPEDEF];
|
|
|
| /**
|
| + * A table mapping the lexemes of keywords to the corresponding keyword.
|
| + */
|
| + static Map<String, Keyword> keywords = _createKeywordMap();
|
| +
|
| + /**
|
| * The lexeme for the keyword.
|
| */
|
| final String syntax;
|
| @@ -569,30 +614,13 @@ class Keyword extends Enum<Keyword> {
|
| final bool isPseudoKeyword;
|
|
|
| /**
|
| - * A table mapping the lexemes of keywords to the corresponding keyword.
|
| - */
|
| - static Map<String, Keyword> keywords = _createKeywordMap();
|
| -
|
| - /**
|
| - * Create a table mapping the lexemes of keywords to the corresponding keyword.
|
| - *
|
| - * @return the table that was created
|
| - */
|
| - static Map<String, Keyword> _createKeywordMap() {
|
| - LinkedHashMap<String, Keyword> result = new LinkedHashMap<String, Keyword>();
|
| - for (Keyword keyword in values) {
|
| - result[keyword.syntax] = keyword;
|
| - }
|
| - return result;
|
| - }
|
| -
|
| - /**
|
| * Initialize a newly created keyword to have the given syntax. The keyword is not a
|
| * pseudo-keyword.
|
| *
|
| * @param syntax the lexeme for the keyword
|
| */
|
| - const Keyword.con1(String name, int ordinal, String syntax) : this.con2(name, ordinal, syntax, false);
|
| + const Keyword.con1(String name, int ordinal, String syntax)
|
| + : this.con2(name, ordinal, syntax, false);
|
|
|
| /**
|
| * Initialize a newly created keyword to have the given syntax. The keyword is a pseudo-keyword if
|
| @@ -601,7 +629,23 @@ class Keyword extends Enum<Keyword> {
|
| * @param syntax the lexeme for the keyword
|
| * @param isPseudoKeyword `true` if this keyword is a pseudo-keyword
|
| */
|
| - const Keyword.con2(String name, int ordinal, this.syntax, this.isPseudoKeyword) : super(name, ordinal);
|
| + const Keyword.con2(String name, int ordinal, this.syntax,
|
| + this.isPseudoKeyword)
|
| + : super(name, ordinal);
|
| +
|
| + /**
|
| + * Create a table mapping the lexemes of keywords to the corresponding keyword.
|
| + *
|
| + * @return the table that was created
|
| + */
|
| + static Map<String, Keyword> _createKeywordMap() {
|
| + LinkedHashMap<String, Keyword> result =
|
| + new LinkedHashMap<String, Keyword>();
|
| + for (Keyword keyword in values) {
|
| + result[keyword.syntax] = keyword;
|
| + }
|
| + return result;
|
| + }
|
| }
|
|
|
| /**
|
| @@ -620,6 +664,46 @@ class KeywordState {
|
| static KeywordState KEYWORD_STATE = _createKeywordStateTable();
|
|
|
| /**
|
| + * A table mapping characters to the states to which those characters will transition. (The index
|
| + * into the array is the offset from the character `'a'` to the transitioning character.)
|
| + */
|
| + final List<KeywordState> _table;
|
| +
|
| + /**
|
| + * The keyword that is recognized by this state, or `null` if this state is not a terminal
|
| + * state.
|
| + */
|
| + Keyword _keyword;
|
| +
|
| + /**
|
| + * Initialize a newly created state to have the given transitions and to recognize the keyword
|
| + * with the given syntax.
|
| + *
|
| + * @param table a table mapping characters to the states to which those characters will transition
|
| + * @param syntax the syntax of the keyword that is recognized by the state
|
| + */
|
| + KeywordState(this._table, String syntax) {
|
| + this._keyword = (syntax == null) ? null : Keyword.keywords[syntax];
|
| + }
|
| +
|
| + /**
|
| + * Return the keyword that was recognized by this state, or `null` if this state does not
|
| + * recognized a keyword.
|
| + *
|
| + * @return the keyword that was matched by reaching this state
|
| + */
|
| + Keyword keyword() => _keyword;
|
| +
|
| + /**
|
| + * Return the state that follows this state on a transition of the given character, or
|
| + * `null` if there is no valid state reachable from this state with such a transition.
|
| + *
|
| + * @param c the character used to transition from this state to another state
|
| + * @return the state that follows this state on a transition of the given character
|
| + */
|
| + KeywordState next(int c) => _table[c - 0x61];
|
| +
|
| + /**
|
| * Create the next state in the state machine where we have already recognized the subset of
|
| * strings in the given array of strings starting at the given offset and having the given length.
|
| * All of these strings have a common prefix and the next character is at the given start index.
|
| @@ -632,7 +716,8 @@ class KeywordState {
|
| * @param length the number of strings in the array that pass through the state being built
|
| * @return the state that was created
|
| */
|
| - static KeywordState _computeKeywordStateTable(int start, List<String> strings, int offset, int length) {
|
| + static KeywordState _computeKeywordStateTable(int start, List<String> strings,
|
| + int offset, int length) {
|
| List<KeywordState> result = new List<KeywordState>(26);
|
| assert(length != 0);
|
| int chunk = 0x0;
|
| @@ -646,7 +731,8 @@ class KeywordState {
|
| int c = strings[i].codeUnitAt(start);
|
| if (chunk != c) {
|
| if (chunkStart != -1) {
|
| - result[chunk - 0x61] = _computeKeywordStateTable(start + 1, strings, chunkStart, i - chunkStart);
|
| + result[chunk - 0x61] =
|
| + _computeKeywordStateTable(start + 1, strings, chunkStart, i - chunkStart);
|
| }
|
| chunkStart = i;
|
| chunk = c;
|
| @@ -655,7 +741,12 @@ class KeywordState {
|
| }
|
| if (chunkStart != -1) {
|
| assert(result[chunk - 0x61] == null);
|
| - result[chunk - 0x61] = _computeKeywordStateTable(start + 1, strings, chunkStart, offset + length - chunkStart);
|
| + result[chunk -
|
| + 0x61] = _computeKeywordStateTable(
|
| + start + 1,
|
| + strings,
|
| + chunkStart,
|
| + offset + length - chunkStart);
|
| } else {
|
| assert(length == 1);
|
| return new KeywordState(_EMPTY_TABLE, strings[offset]);
|
| @@ -681,46 +772,6 @@ class KeywordState {
|
| strings.sort();
|
| return _computeKeywordStateTable(0, strings, 0, strings.length);
|
| }
|
| -
|
| - /**
|
| - * A table mapping characters to the states to which those characters will transition. (The index
|
| - * into the array is the offset from the character `'a'` to the transitioning character.)
|
| - */
|
| - final List<KeywordState> _table;
|
| -
|
| - /**
|
| - * The keyword that is recognized by this state, or `null` if this state is not a terminal
|
| - * state.
|
| - */
|
| - Keyword _keyword;
|
| -
|
| - /**
|
| - * Initialize a newly created state to have the given transitions and to recognize the keyword
|
| - * with the given syntax.
|
| - *
|
| - * @param table a table mapping characters to the states to which those characters will transition
|
| - * @param syntax the syntax of the keyword that is recognized by the state
|
| - */
|
| - KeywordState(this._table, String syntax) {
|
| - this._keyword = (syntax == null) ? null : Keyword.keywords[syntax];
|
| - }
|
| -
|
| - /**
|
| - * Return the keyword that was recognized by this state, or `null` if this state does not
|
| - * recognized a keyword.
|
| - *
|
| - * @return the keyword that was matched by reaching this state
|
| - */
|
| - Keyword keyword() => _keyword;
|
| -
|
| - /**
|
| - * Return the state that follows this state on a transition of the given character, or
|
| - * `null` if there is no valid state reachable from this state with such a transition.
|
| - *
|
| - * @param c the character used to transition from this state to another state
|
| - * @return the state that follows this state on a transition of the given character
|
| - */
|
| - KeywordState next(int c) => _table[c - 0x61];
|
| }
|
|
|
| /**
|
| @@ -741,10 +792,10 @@ class KeywordToken extends Token {
|
| KeywordToken(this.keyword, int offset) : super(TokenType.KEYWORD, offset);
|
|
|
| @override
|
| - Token copy() => new KeywordToken(keyword, offset);
|
| + String get lexeme => keyword.syntax;
|
|
|
| @override
|
| - String get lexeme => keyword.syntax;
|
| + Token copy() => new KeywordToken(keyword, offset);
|
|
|
| @override
|
| Keyword value() => keyword;
|
| @@ -768,10 +819,8 @@ class KeywordTokenWithComment extends KeywordToken {
|
| * @param offset the offset from the beginning of the file to the first character in the token
|
| * @param precedingComment the first comment in the list of comments that precede this token
|
| */
|
| - KeywordTokenWithComment(Keyword keyword, int offset, this._precedingComment) : super(keyword, offset);
|
| -
|
| - @override
|
| - Token copy() => new KeywordTokenWithComment(keyword, offset, copyComments(_precedingComment));
|
| + KeywordTokenWithComment(Keyword keyword, int offset, this._precedingComment)
|
| + : super(keyword, offset);
|
|
|
| @override
|
| Token get precedingComments => _precedingComment;
|
| @@ -785,6 +834,10 @@ class KeywordTokenWithComment extends KeywordToken {
|
| token = token.next;
|
| }
|
| }
|
| +
|
| + @override
|
| + Token copy() =>
|
| + new KeywordTokenWithComment(keyword, offset, copyComments(_precedingComment));
|
| }
|
|
|
| /**
|
| @@ -879,11 +932,11 @@ class Scanner {
|
| }
|
|
|
| /**
|
| - * Return an array containing the offsets of the first character of each line in the source code.
|
| + * Return the first token in the token stream that was scanned.
|
| *
|
| - * @return an array containing the offsets of the first character of each line in the source code
|
| + * @return the first token in the token stream that was scanned
|
| */
|
| - List<int> get lineStarts => _lineStarts;
|
| + Token get firstToken => _tokens.next;
|
|
|
| /**
|
| * Return `true` if any unmatched groups were found during the parse.
|
| @@ -893,58 +946,27 @@ class Scanner {
|
| bool get hasUnmatchedGroups => _hasUnmatchedGroups;
|
|
|
| /**
|
| - * Set whether documentation tokens should be scanned.
|
| + * Return an array containing the offsets of the first character of each line in the source code.
|
| *
|
| - * @param preserveComments `true` if documentation tokens should be scanned
|
| + * @return an array containing the offsets of the first character of each line in the source code
|
| */
|
| - void set preserveComments(bool preserveComments) {
|
| - this._preserveComments = preserveComments;
|
| - }
|
| + List<int> get lineStarts => _lineStarts;
|
|
|
| /**
|
| - * Record that the source begins on the given line and column at the current offset as given by
|
| - * the reader. The line starts for lines before the given line will not be correct.
|
| - *
|
| - * This method must be invoked at most one time and must be invoked before scanning begins. The
|
| - * values provided must be sensible. The results are undefined if these conditions are violated.
|
| + * Set whether documentation tokens should be scanned.
|
| *
|
| - * @param line the one-based index of the line containing the first character of the source
|
| - * @param column the one-based index of the column in which the first character of the source
|
| - * occurs
|
| + * @param preserveComments `true` if documentation tokens should be scanned
|
| */
|
| - void setSourceStart(int line, int column) {
|
| - int offset = _reader.offset;
|
| - if (line < 1 || column < 1 || offset < 0 || (line + column - 2) >= offset) {
|
| - return;
|
| - }
|
| - for (int i = 2; i < line; i++) {
|
| - _lineStarts.add(1);
|
| - }
|
| - _lineStarts.add(offset - column + 1);
|
| + void set preserveComments(bool preserveComments) {
|
| + this._preserveComments = preserveComments;
|
| }
|
|
|
| /**
|
| - * Scan the source code to produce a list of tokens representing the source.
|
| + * Return the last token that was scanned.
|
| *
|
| - * @return the first token in the list of tokens that were produced
|
| + * @return the last token that was scanned
|
| */
|
| - Token tokenize() {
|
| - InstrumentationBuilder instrumentation = Instrumentation.builder2("dart.engine.AbstractScanner.tokenize");
|
| - int tokenCounter = 0;
|
| - try {
|
| - int next = _reader.advance();
|
| - while (next != -1) {
|
| - tokenCounter++;
|
| - next = bigSwitch(next);
|
| - }
|
| - _appendEofToken();
|
| - instrumentation.metric2("tokensCount", tokenCounter);
|
| - return firstToken;
|
| - } finally {
|
| - instrumentation.log2(2);
|
| - //Log if over 1ms
|
| - }
|
| - }
|
| + Token get tail => _tail;
|
|
|
| /**
|
| * Append the given token to the end of the token stream being scanned. This method is intended to
|
| @@ -1057,7 +1079,9 @@ class Scanner {
|
| return _reader.advance();
|
| }
|
| if (next == 0x5D) {
|
| - _appendEndToken(TokenType.CLOSE_SQUARE_BRACKET, TokenType.OPEN_SQUARE_BRACKET);
|
| + _appendEndToken(
|
| + TokenType.CLOSE_SQUARE_BRACKET,
|
| + TokenType.OPEN_SQUARE_BRACKET);
|
| return _reader.advance();
|
| }
|
| if (next == 0x60) {
|
| @@ -1069,7 +1093,9 @@ class Scanner {
|
| return _reader.advance();
|
| }
|
| if (next == 0x7D) {
|
| - _appendEndToken(TokenType.CLOSE_CURLY_BRACKET, TokenType.OPEN_CURLY_BRACKET);
|
| + _appendEndToken(
|
| + TokenType.CLOSE_CURLY_BRACKET,
|
| + TokenType.OPEN_CURLY_BRACKET);
|
| return _reader.advance();
|
| }
|
| if (next == 0x2F) {
|
| @@ -1099,24 +1125,56 @@ class Scanner {
|
| }
|
|
|
| /**
|
| - * Return the first token in the token stream that was scanned.
|
| - *
|
| - * @return the first token in the token stream that was scanned
|
| + * Record the fact that we are at the beginning of a new line in the source.
|
| */
|
| - Token get firstToken => _tokens.next;
|
| + void recordStartOfLine() {
|
| + _lineStarts.add(_reader.offset);
|
| + }
|
|
|
| /**
|
| - * Return the last token that was scanned.
|
| + * Record that the source begins on the given line and column at the current offset as given by
|
| + * the reader. The line starts for lines before the given line will not be correct.
|
| *
|
| - * @return the last token that was scanned
|
| + * This method must be invoked at most one time and must be invoked before scanning begins. The
|
| + * values provided must be sensible. The results are undefined if these conditions are violated.
|
| + *
|
| + * @param line the one-based index of the line containing the first character of the source
|
| + * @param column the one-based index of the column in which the first character of the source
|
| + * occurs
|
| */
|
| - Token get tail => _tail;
|
| + void setSourceStart(int line, int column) {
|
| + int offset = _reader.offset;
|
| + if (line < 1 || column < 1 || offset < 0 || (line + column - 2) >= offset) {
|
| + return;
|
| + }
|
| + for (int i = 2; i < line; i++) {
|
| + _lineStarts.add(1);
|
| + }
|
| + _lineStarts.add(offset - column + 1);
|
| + }
|
|
|
| /**
|
| - * Record the fact that we are at the beginning of a new line in the source.
|
| + * Scan the source code to produce a list of tokens representing the source.
|
| + *
|
| + * @return the first token in the list of tokens that were produced
|
| */
|
| - void recordStartOfLine() {
|
| - _lineStarts.add(_reader.offset);
|
| + Token tokenize() {
|
| + InstrumentationBuilder instrumentation =
|
| + Instrumentation.builder2("dart.engine.AbstractScanner.tokenize");
|
| + int tokenCounter = 0;
|
| + try {
|
| + int next = _reader.advance();
|
| + while (next != -1) {
|
| + tokenCounter++;
|
| + next = bigSwitch(next);
|
| + }
|
| + _appendEofToken();
|
| + instrumentation.metric2("tokensCount", tokenCounter);
|
| + return firstToken;
|
| + } finally {
|
| + instrumentation.log2(2);
|
| + //Log if over 1ms
|
| + }
|
| }
|
|
|
| void _appendBeginToken(TokenType type) {
|
| @@ -1143,7 +1201,8 @@ class Scanner {
|
| _firstComment = new StringToken(type, value, _tokenStart);
|
| _lastComment = _firstComment;
|
| } else {
|
| - _lastComment = _lastComment.setNext(new StringToken(type, value, _tokenStart));
|
| + _lastComment =
|
| + _lastComment.setNext(new StringToken(type, value, _tokenStart));
|
| }
|
| }
|
|
|
| @@ -1171,11 +1230,13 @@ class Scanner {
|
| if (_firstComment == null) {
|
| eofToken = new Token(TokenType.EOF, _reader.offset + 1);
|
| } else {
|
| - eofToken = new TokenWithComment(TokenType.EOF, _reader.offset + 1, _firstComment);
|
| + eofToken =
|
| + new TokenWithComment(TokenType.EOF, _reader.offset + 1, _firstComment);
|
| _firstComment = null;
|
| _lastComment = null;
|
| }
|
| - // The EOF token points to itself so that there is always infinite look-ahead.
|
| + // The EOF token points to itself so that there is always infinite
|
| + // look-ahead.
|
| eofToken.setNext(eofToken);
|
| _tail = _tail.setNext(eofToken);
|
| if (_stackEnd >= 0) {
|
| @@ -1188,7 +1249,8 @@ class Scanner {
|
| if (_firstComment == null) {
|
| _tail = _tail.setNext(new KeywordToken(keyword, _tokenStart));
|
| } else {
|
| - _tail = _tail.setNext(new KeywordTokenWithComment(keyword, _tokenStart, _firstComment));
|
| + _tail = _tail.setNext(
|
| + new KeywordTokenWithComment(keyword, _tokenStart, _firstComment));
|
| _firstComment = null;
|
| _lastComment = null;
|
| }
|
| @@ -1198,7 +1260,8 @@ class Scanner {
|
| if (_firstComment == null) {
|
| _tail = _tail.setNext(new StringToken(type, value, _tokenStart));
|
| } else {
|
| - _tail = _tail.setNext(new StringTokenWithComment(type, value, _tokenStart, _firstComment));
|
| + _tail = _tail.setNext(
|
| + new StringTokenWithComment(type, value, _tokenStart, _firstComment));
|
| _firstComment = null;
|
| _lastComment = null;
|
| }
|
| @@ -1208,7 +1271,8 @@ class Scanner {
|
| if (_firstComment == null) {
|
| _tail = _tail.setNext(new StringToken(type, value, _tokenStart + offset));
|
| } else {
|
| - _tail = _tail.setNext(new StringTokenWithComment(type, value, _tokenStart + offset, _firstComment));
|
| + _tail = _tail.setNext(
|
| + new StringTokenWithComment(type, value, _tokenStart + offset, _firstComment));
|
| _firstComment = null;
|
| _lastComment = null;
|
| }
|
| @@ -1218,7 +1282,8 @@ class Scanner {
|
| if (_firstComment == null) {
|
| _tail = _tail.setNext(new Token(type, _tokenStart));
|
| } else {
|
| - _tail = _tail.setNext(new TokenWithComment(type, _tokenStart, _firstComment));
|
| + _tail =
|
| + _tail.setNext(new TokenWithComment(type, _tokenStart, _firstComment));
|
| _firstComment = null;
|
| _lastComment = null;
|
| }
|
| @@ -1248,15 +1313,17 @@ class Scanner {
|
| BeginToken _findTokenMatchingClosingBraceInInterpolationExpression() {
|
| while (_stackEnd >= 0) {
|
| BeginToken begin = _groupingStack[_stackEnd];
|
| - if (begin.type == TokenType.OPEN_CURLY_BRACKET || begin.type == TokenType.STRING_INTERPOLATION_EXPRESSION) {
|
| + if (begin.type == TokenType.OPEN_CURLY_BRACKET ||
|
| + begin.type == TokenType.STRING_INTERPOLATION_EXPRESSION) {
|
| return begin;
|
| }
|
| _hasUnmatchedGroups = true;
|
| _groupingStack.removeAt(_stackEnd--);
|
| }
|
| //
|
| - // We should never get to this point because we wouldn't be inside a string interpolation
|
| - // expression unless we had previously found the start of the expression.
|
| + // We should never get to this point because we wouldn't be inside a string
|
| + // interpolation expression unless we had previously found the start of the
|
| + // expression.
|
| //
|
| return null;
|
| }
|
| @@ -1268,7 +1335,8 @@ class Scanner {
|
| * @param arguments any arguments needed to complete the error message
|
| */
|
| void _reportError(ScannerErrorCode errorCode, [List<Object> arguments]) {
|
| - _errorListener.onError(new AnalysisError.con2(source, _reader.offset, 1, errorCode, arguments));
|
| + _errorListener.onError(
|
| + new AnalysisError.con2(source, _reader.offset, 1, errorCode, arguments));
|
| }
|
|
|
| int _select(int choice, TokenType yesType, TokenType noType) {
|
| @@ -1282,7 +1350,8 @@ class Scanner {
|
| }
|
| }
|
|
|
| - int _selectWithOffset(int choice, TokenType yesType, TokenType noType, int offset) {
|
| + int _selectWithOffset(int choice, TokenType yesType, TokenType noType,
|
| + int offset) {
|
| int next = _reader.advance();
|
| if (next == choice) {
|
| _appendTokenOfTypeWithOffset(yesType, offset);
|
| @@ -1323,7 +1392,8 @@ class Scanner {
|
| }
|
| }
|
|
|
| - int _tokenizeCaret(int next) => _select(0x3D, TokenType.CARET_EQ, TokenType.CARET);
|
| + int _tokenizeCaret(int next) =>
|
| + _select(0x3D, TokenType.CARET_EQ, TokenType.CARET);
|
|
|
| int _tokenizeDotOrNumber(int next) {
|
| int start = _reader.offset;
|
| @@ -1331,7 +1401,10 @@ class Scanner {
|
| if (0x30 <= next && next <= 0x39) {
|
| return _tokenizeFractionPart(next, start);
|
| } else if (0x2E == next) {
|
| - return _select(0x2E, TokenType.PERIOD_PERIOD_PERIOD, TokenType.PERIOD_PERIOD);
|
| + return _select(
|
| + 0x2E,
|
| + TokenType.PERIOD_PERIOD_PERIOD,
|
| + TokenType.PERIOD_PERIOD);
|
| } else {
|
| _appendTokenOfType(TokenType.PERIOD);
|
| return next;
|
| @@ -1401,12 +1474,18 @@ class Scanner {
|
| if (!hasDigit) {
|
| _appendStringToken(TokenType.INT, _reader.getString(start, -2));
|
| if (0x2E == next) {
|
| - return _selectWithOffset(0x2E, TokenType.PERIOD_PERIOD_PERIOD, TokenType.PERIOD_PERIOD, _reader.offset - 1);
|
| + return _selectWithOffset(
|
| + 0x2E,
|
| + TokenType.PERIOD_PERIOD_PERIOD,
|
| + TokenType.PERIOD_PERIOD,
|
| + _reader.offset - 1);
|
| }
|
| _appendTokenOfTypeWithOffset(TokenType.PERIOD, _reader.offset - 1);
|
| return bigSwitch(next);
|
| }
|
| - _appendStringToken(TokenType.DOUBLE, _reader.getString(start, next < 0 ? 0 : -1));
|
| + _appendStringToken(
|
| + TokenType.DOUBLE,
|
| + _reader.getString(start, next < 0 ? 0 : -1));
|
| return next;
|
| }
|
|
|
| @@ -1436,13 +1515,17 @@ class Scanner {
|
| bool hasDigits = false;
|
| while (true) {
|
| next = _reader.advance();
|
| - if ((0x30 <= next && next <= 0x39) || (0x41 <= next && next <= 0x46) || (0x61 <= next && next <= 0x66)) {
|
| + if ((0x30 <= next && next <= 0x39) ||
|
| + (0x41 <= next && next <= 0x46) ||
|
| + (0x61 <= next && next <= 0x66)) {
|
| hasDigits = true;
|
| } else {
|
| if (!hasDigits) {
|
| _reportError(ScannerErrorCode.MISSING_HEX_DIGIT);
|
| }
|
| - _appendStringToken(TokenType.HEXADECIMAL, _reader.getString(start, next < 0 ? 0 : -1));
|
| + _appendStringToken(
|
| + TokenType.HEXADECIMAL,
|
| + _reader.getString(start, next < 0 ? 0 : -1));
|
| return next;
|
| }
|
| }
|
| @@ -1458,10 +1541,16 @@ class Scanner {
|
| }
|
|
|
| int _tokenizeIdentifier(int next, int start, bool allowDollar) {
|
| - while ((0x61 <= next && next <= 0x7A) || (0x41 <= next && next <= 0x5A) || (0x30 <= next && next <= 0x39) || next == 0x5F || (next == 0x24 && allowDollar)) {
|
| + while ((0x61 <= next && next <= 0x7A) ||
|
| + (0x41 <= next && next <= 0x5A) ||
|
| + (0x30 <= next && next <= 0x39) ||
|
| + next == 0x5F ||
|
| + (next == 0x24 && allowDollar)) {
|
| next = _reader.advance();
|
| }
|
| - _appendStringToken(TokenType.IDENTIFIER, _reader.getString(start, next < 0 ? 0 : -1));
|
| + _appendStringToken(
|
| + TokenType.IDENTIFIER,
|
| + _reader.getString(start, next < 0 ? 0 : -1));
|
| return next;
|
| }
|
|
|
| @@ -1470,7 +1559,8 @@ class Scanner {
|
| next = _reader.advance();
|
| while (next != -1) {
|
| if (next == 0x7D) {
|
| - BeginToken begin = _findTokenMatchingClosingBraceInInterpolationExpression();
|
| + BeginToken begin =
|
| + _findTokenMatchingClosingBraceInInterpolationExpression();
|
| if (begin == null) {
|
| _beginToken();
|
| _appendTokenOfType(TokenType.CLOSE_CURLY_BRACKET);
|
| @@ -1479,12 +1569,16 @@ class Scanner {
|
| return next;
|
| } else if (begin.type == TokenType.OPEN_CURLY_BRACKET) {
|
| _beginToken();
|
| - _appendEndToken(TokenType.CLOSE_CURLY_BRACKET, TokenType.OPEN_CURLY_BRACKET);
|
| + _appendEndToken(
|
| + TokenType.CLOSE_CURLY_BRACKET,
|
| + TokenType.OPEN_CURLY_BRACKET);
|
| next = _reader.advance();
|
| _beginToken();
|
| } else if (begin.type == TokenType.STRING_INTERPOLATION_EXPRESSION) {
|
| _beginToken();
|
| - _appendEndToken(TokenType.CLOSE_CURLY_BRACKET, TokenType.STRING_INTERPOLATION_EXPRESSION);
|
| + _appendEndToken(
|
| + TokenType.CLOSE_CURLY_BRACKET,
|
| + TokenType.STRING_INTERPOLATION_EXPRESSION);
|
| next = _reader.advance();
|
| _beginToken();
|
| return next;
|
| @@ -1497,8 +1591,13 @@ class Scanner {
|
| }
|
|
|
| int _tokenizeInterpolatedIdentifier(int next, int start) {
|
| - _appendStringTokenWithOffset(TokenType.STRING_INTERPOLATION_IDENTIFIER, "\$", 0);
|
| - if ((0x41 <= next && next <= 0x5A) || (0x61 <= next && next <= 0x7A) || next == 0x5F) {
|
| + _appendStringTokenWithOffset(
|
| + TokenType.STRING_INTERPOLATION_IDENTIFIER,
|
| + "\$",
|
| + 0);
|
| + if ((0x41 <= next && next <= 0x5A) ||
|
| + (0x61 <= next && next <= 0x7A) ||
|
| + next == 0x5F) {
|
| _beginToken();
|
| next = _tokenizeKeywordOrIdentifier(next, false);
|
| }
|
| @@ -1516,7 +1615,10 @@ class Scanner {
|
| if (state == null || state.keyword() == null) {
|
| return _tokenizeIdentifier(next, start, allowDollar);
|
| }
|
| - if ((0x41 <= next && next <= 0x5A) || (0x30 <= next && next <= 0x39) || next == 0x5F || next == 0x24) {
|
| + if ((0x41 <= next && next <= 0x5A) ||
|
| + (0x30 <= next && next <= 0x39) ||
|
| + next == 0x5F ||
|
| + next == 0x24) {
|
| return _tokenizeIdentifier(next, start, allowDollar);
|
| } else if (next < 128) {
|
| _appendKeywordToken(state.keyword());
|
| @@ -1561,14 +1663,18 @@ class Scanner {
|
| while (true) {
|
| if (-1 == next) {
|
| _reportError(ScannerErrorCode.UNTERMINATED_MULTI_LINE_COMMENT);
|
| - _appendCommentToken(TokenType.MULTI_LINE_COMMENT, _reader.getString(_tokenStart, 0));
|
| + _appendCommentToken(
|
| + TokenType.MULTI_LINE_COMMENT,
|
| + _reader.getString(_tokenStart, 0));
|
| return next;
|
| } else if (0x2A == next) {
|
| next = _reader.advance();
|
| if (0x2F == next) {
|
| --nesting;
|
| if (0 == nesting) {
|
| - _appendCommentToken(TokenType.MULTI_LINE_COMMENT, _reader.getString(_tokenStart, 0));
|
| + _appendCommentToken(
|
| + TokenType.MULTI_LINE_COMMENT,
|
| + _reader.getString(_tokenStart, 0));
|
| return _reader.advance();
|
| } else {
|
| next = _reader.advance();
|
| @@ -1690,7 +1796,8 @@ class Scanner {
|
| return _reader.advance();
|
| }
|
|
|
| - int _tokenizeMultiply(int next) => _select(0x3D, TokenType.STAR_EQ, TokenType.STAR);
|
| + int _tokenizeMultiply(int next) =>
|
| + _select(0x3D, TokenType.STAR_EQ, TokenType.STAR);
|
|
|
| int _tokenizeNumber(int next) {
|
| int start = _reader.offset;
|
| @@ -1703,7 +1810,9 @@ class Scanner {
|
| } else if (next == 0x65 || next == 0x45) {
|
| return _tokenizeFractionPart(next, start);
|
| } else {
|
| - _appendStringToken(TokenType.INT, _reader.getString(start, next < 0 ? 0 : -1));
|
| + _appendStringToken(
|
| + TokenType.INT,
|
| + _reader.getString(start, next < 0 ? 0 : -1));
|
| return next;
|
| }
|
| }
|
| @@ -1720,7 +1829,8 @@ class Scanner {
|
| }
|
| }
|
|
|
| - int _tokenizePercent(int next) => _select(0x3D, TokenType.PERCENT_EQ, TokenType.PERCENT);
|
| + int _tokenizePercent(int next) =>
|
| + _select(0x3D, TokenType.PERCENT_EQ, TokenType.PERCENT);
|
|
|
| int _tokenizePlus(int next) {
|
| // + ++ +=
|
| @@ -1741,10 +1851,14 @@ class Scanner {
|
| while (true) {
|
| next = _reader.advance();
|
| if (-1 == next) {
|
| - _appendCommentToken(TokenType.SINGLE_LINE_COMMENT, _reader.getString(_tokenStart, 0));
|
| + _appendCommentToken(
|
| + TokenType.SINGLE_LINE_COMMENT,
|
| + _reader.getString(_tokenStart, 0));
|
| return next;
|
| } else if (0xA == next || 0xD == next) {
|
| - _appendCommentToken(TokenType.SINGLE_LINE_COMMENT, _reader.getString(_tokenStart, -1));
|
| + _appendCommentToken(
|
| + TokenType.SINGLE_LINE_COMMENT,
|
| + _reader.getString(_tokenStart, -1));
|
| return next;
|
| }
|
| }
|
| @@ -1849,7 +1963,9 @@ class Scanner {
|
| do {
|
| next = _reader.advance();
|
| } while (next != 0xA && next != 0xD && next > 0);
|
| - _appendStringToken(TokenType.SCRIPT_TAG, _reader.getString(_tokenStart, 0));
|
| + _appendStringToken(
|
| + TokenType.SCRIPT_TAG,
|
| + _reader.getString(_tokenStart, 0));
|
| return next;
|
| }
|
| }
|
| @@ -1874,27 +1990,25 @@ class Scanner {
|
| * detected by the scanner.
|
| */
|
| class ScannerErrorCode extends ErrorCode {
|
| - static const ScannerErrorCode ILLEGAL_CHARACTER
|
| - = const ScannerErrorCode('ILLEGAL_CHARACTER', "Illegal character {0}");
|
| + static const ScannerErrorCode ILLEGAL_CHARACTER =
|
| + const ScannerErrorCode('ILLEGAL_CHARACTER', "Illegal character {0}");
|
|
|
| - static const ScannerErrorCode MISSING_DIGIT
|
| - = const ScannerErrorCode('MISSING_DIGIT', "Decimal digit expected");
|
| + static const ScannerErrorCode MISSING_DIGIT =
|
| + const ScannerErrorCode('MISSING_DIGIT', "Decimal digit expected");
|
|
|
| - static const ScannerErrorCode MISSING_HEX_DIGIT
|
| - = const ScannerErrorCode(
|
| - 'MISSING_HEX_DIGIT',
|
| - "Hexidecimal digit expected");
|
| + static const ScannerErrorCode MISSING_HEX_DIGIT =
|
| + const ScannerErrorCode('MISSING_HEX_DIGIT', "Hexidecimal digit expected");
|
|
|
| - static const ScannerErrorCode MISSING_QUOTE
|
| - = const ScannerErrorCode('MISSING_QUOTE', "Expected quote (' or \")");
|
| + static const ScannerErrorCode MISSING_QUOTE =
|
| + const ScannerErrorCode('MISSING_QUOTE', "Expected quote (' or \")");
|
|
|
| - static const ScannerErrorCode UNTERMINATED_MULTI_LINE_COMMENT
|
| - = const ScannerErrorCode(
|
| + static const ScannerErrorCode UNTERMINATED_MULTI_LINE_COMMENT =
|
| + const ScannerErrorCode(
|
| 'UNTERMINATED_MULTI_LINE_COMMENT',
|
| "Unterminated multi-line comment");
|
|
|
| - static const ScannerErrorCode UNTERMINATED_STRING_LITERAL
|
| - = const ScannerErrorCode(
|
| + static const ScannerErrorCode UNTERMINATED_STRING_LITERAL =
|
| + const ScannerErrorCode(
|
| 'UNTERMINATED_STRING_LITERAL',
|
| "Unterminated string literal");
|
|
|
| @@ -1936,10 +2050,10 @@ class StringToken extends Token {
|
| }
|
|
|
| @override
|
| - Token copy() => new StringToken(type, _value, offset);
|
| + String get lexeme => _value;
|
|
|
| @override
|
| - String get lexeme => _value;
|
| + Token copy() => new StringToken(type, _value, offset);
|
|
|
| @override
|
| String value() => _value;
|
| @@ -1963,10 +2077,9 @@ class StringTokenWithComment extends StringToken {
|
| * @param offset the offset from the beginning of the file to the first character in the token
|
| * @param precedingComment the first comment in the list of comments that precede this token
|
| */
|
| - StringTokenWithComment(TokenType type, String value, int offset, this._precedingComment) : super(type, value, offset);
|
| -
|
| - @override
|
| - Token copy() => new StringTokenWithComment(type, lexeme, offset, copyComments(_precedingComment));
|
| + StringTokenWithComment(TokenType type, String value, int offset,
|
| + this._precedingComment)
|
| + : super(type, value, offset);
|
|
|
| @override
|
| Token get precedingComments => _precedingComment;
|
| @@ -1980,6 +2093,14 @@ class StringTokenWithComment extends StringToken {
|
| token = token.next;
|
| }
|
| }
|
| +
|
| + @override
|
| + Token copy() =>
|
| + new StringTokenWithComment(
|
| + type,
|
| + lexeme,
|
| + offset,
|
| + copyComments(_precedingComment));
|
| }
|
|
|
| /**
|
| @@ -2006,12 +2127,13 @@ class SubSequenceReader extends CharSequenceReader {
|
| int get offset => _offsetDelta + super.offset;
|
|
|
| @override
|
| - String getString(int start, int endDelta) => super.getString(start - _offsetDelta, endDelta);
|
| -
|
| - @override
|
| void set offset(int offset) {
|
| super.offset = offset - _offsetDelta;
|
| }
|
| +
|
| + @override
|
| + String getString(int start, int endDelta) =>
|
| + super.getString(start - _offsetDelta, endDelta);
|
| }
|
|
|
| /**
|
| @@ -2025,7 +2147,8 @@ class SyntheticStringToken extends StringToken {
|
| * @param value the lexeme represented by this token
|
| * @param offset the offset from the beginning of the file to the first character in the token
|
| */
|
| - SyntheticStringToken(TokenType type, String value, int offset) : super(type, value, offset);
|
| + SyntheticStringToken(TokenType type, String value, int offset)
|
| + : super(type, value, offset);
|
|
|
| @override
|
| bool get isSynthetic => true;
|
| @@ -2057,26 +2180,6 @@ class Token {
|
| Token _next;
|
|
|
| /**
|
| - * Compare the given [tokens] to find the token that appears first in the
|
| - * source being parsed. That is, return the left-most of all of the tokens.
|
| - * The list must be non-`null`, but the elements of the list are allowed to be
|
| - * `null`. Return the token with the smallest offset, or `null` if there are
|
| - * no tokens or if all of the tokens are `null`.
|
| - */
|
| - static Token lexicallyFirst(List<Token> tokens) {
|
| - Token first = null;
|
| - int offset = -1;
|
| - for (Token token in tokens) {
|
| - if (token != null && (offset < 0 || token.offset < offset)) {
|
| - first = token;
|
| - offset = token.offset;
|
| - }
|
| - }
|
| - return first;
|
| - }
|
| -
|
| -
|
| - /**
|
| * Initialize a newly created token to have the given type and offset.
|
| *
|
| * @param type the type of the token
|
| @@ -2086,13 +2189,6 @@ class Token {
|
| this.offset = offset;
|
| }
|
|
|
| - /**
|
| - * Return a newly created token that is a copy of this token but that is not a part of any token
|
| - * stream.
|
| - *
|
| - * @return a newly created token that is a copy of this token
|
| - */
|
| - Token copy() => new Token(type, offset);
|
|
|
| /**
|
| * Return the offset from the beginning of the file to the character after last character of the
|
| @@ -2104,6 +2200,28 @@ class Token {
|
| int get end => offset + length;
|
|
|
| /**
|
| + * Return `true` if this token represents an operator.
|
| + *
|
| + * @return `true` if this token represents an operator
|
| + */
|
| + bool get isOperator => type.isOperator;
|
| +
|
| + /**
|
| + * Return `true` if this token is a synthetic token. A synthetic token is a token that was
|
| + * introduced by the parser in order to recover from an error in the code.
|
| + *
|
| + * @return `true` if this token is a synthetic token
|
| + */
|
| + bool get isSynthetic => length == 0;
|
| +
|
| + /**
|
| + * Return `true` if this token represents an operator that can be defined by users.
|
| + *
|
| + * @return `true` if this token represents an operator that can be defined by users
|
| + */
|
| + bool get isUserDefinableOperator => type.isUserDefinableOperator;
|
| +
|
| + /**
|
| * Return the number of characters in the node's source range.
|
| *
|
| * @return the number of characters in the node's source range
|
| @@ -2134,26 +2252,41 @@ class Token {
|
| Token get precedingComments => null;
|
|
|
| /**
|
| - * Return `true` if this token represents an operator.
|
| + * Apply (add) the given delta to this token's offset.
|
| *
|
| - * @return `true` if this token represents an operator
|
| + * @param delta the amount by which the offset is to be adjusted
|
| */
|
| - bool get isOperator => type.isOperator;
|
| + void applyDelta(int delta) {
|
| + offset += delta;
|
| + }
|
|
|
| /**
|
| - * Return `true` if this token is a synthetic token. A synthetic token is a token that was
|
| - * introduced by the parser in order to recover from an error in the code.
|
| + * Return a newly created token that is a copy of this token but that is not a part of any token
|
| + * stream.
|
| *
|
| - * @return `true` if this token is a synthetic token
|
| + * @return a newly created token that is a copy of this token
|
| */
|
| - bool get isSynthetic => length == 0;
|
| + Token copy() => new Token(type, offset);
|
|
|
| /**
|
| - * Return `true` if this token represents an operator that can be defined by users.
|
| + * Copy a linked list of comment tokens identical to the given comment tokens.
|
| *
|
| - * @return `true` if this token represents an operator that can be defined by users
|
| + * @param token the first token in the list, or `null` if there are no tokens to be copied
|
| + * @return the tokens that were created
|
| */
|
| - bool get isUserDefinableOperator => type.isUserDefinableOperator;
|
| + Token copyComments(Token token) {
|
| + if (token == null) {
|
| + return null;
|
| + }
|
| + Token head = token.copy();
|
| + Token tail = head;
|
| + token = token.next;
|
| + while (token != null) {
|
| + tail = tail.setNext(token.copy());
|
| + token = token.next;
|
| + }
|
| + return head;
|
| + }
|
|
|
| /**
|
| * Return `true` if this token has any one of the given types.
|
| @@ -2207,32 +2340,22 @@ class Token {
|
| Object value() => type.lexeme;
|
|
|
| /**
|
| - * Apply (add) the given delta to this token's offset.
|
| - *
|
| - * @param delta the amount by which the offset is to be adjusted
|
| - */
|
| - void applyDelta(int delta) {
|
| - offset += delta;
|
| - }
|
| -
|
| - /**
|
| - * Copy a linked list of comment tokens identical to the given comment tokens.
|
| - *
|
| - * @param token the first token in the list, or `null` if there are no tokens to be copied
|
| - * @return the tokens that were created
|
| + * Compare the given [tokens] to find the token that appears first in the
|
| + * source being parsed. That is, return the left-most of all of the tokens.
|
| + * The list must be non-`null`, but the elements of the list are allowed to be
|
| + * `null`. Return the token with the smallest offset, or `null` if there are
|
| + * no tokens or if all of the tokens are `null`.
|
| */
|
| - Token copyComments(Token token) {
|
| - if (token == null) {
|
| - return null;
|
| - }
|
| - Token head = token.copy();
|
| - Token tail = head;
|
| - token = token.next;
|
| - while (token != null) {
|
| - tail = tail.setNext(token.copy());
|
| - token = token.next;
|
| + static Token lexicallyFirst(List<Token> tokens) {
|
| + Token first = null;
|
| + int offset = -1;
|
| + for (Token token in tokens) {
|
| + if (token != null && (offset < 0 || token.offset < offset)) {
|
| + first = token;
|
| + offset = token.offset;
|
| + }
|
| }
|
| - return head;
|
| + return first;
|
| }
|
| }
|
|
|
| @@ -2248,77 +2371,92 @@ class TokenClass extends Enum<TokenClass> {
|
| /**
|
| * A value used to indicate that the token type is an additive operator.
|
| */
|
| - static const TokenClass ADDITIVE_OPERATOR = const TokenClass.con2('ADDITIVE_OPERATOR', 1, 12);
|
| + static const TokenClass ADDITIVE_OPERATOR =
|
| + const TokenClass.con2('ADDITIVE_OPERATOR', 1, 12);
|
|
|
| /**
|
| * A value used to indicate that the token type is an assignment operator.
|
| */
|
| - static const TokenClass ASSIGNMENT_OPERATOR = const TokenClass.con2('ASSIGNMENT_OPERATOR', 2, 1);
|
| + static const TokenClass ASSIGNMENT_OPERATOR =
|
| + const TokenClass.con2('ASSIGNMENT_OPERATOR', 2, 1);
|
|
|
| /**
|
| * A value used to indicate that the token type is a bitwise-and operator.
|
| */
|
| - static const TokenClass BITWISE_AND_OPERATOR = const TokenClass.con2('BITWISE_AND_OPERATOR', 3, 10);
|
| + static const TokenClass BITWISE_AND_OPERATOR =
|
| + const TokenClass.con2('BITWISE_AND_OPERATOR', 3, 10);
|
|
|
| /**
|
| * A value used to indicate that the token type is a bitwise-or operator.
|
| */
|
| - static const TokenClass BITWISE_OR_OPERATOR = const TokenClass.con2('BITWISE_OR_OPERATOR', 4, 8);
|
| + static const TokenClass BITWISE_OR_OPERATOR =
|
| + const TokenClass.con2('BITWISE_OR_OPERATOR', 4, 8);
|
|
|
| /**
|
| * A value used to indicate that the token type is a bitwise-xor operator.
|
| */
|
| - static const TokenClass BITWISE_XOR_OPERATOR = const TokenClass.con2('BITWISE_XOR_OPERATOR', 5, 9);
|
| + static const TokenClass BITWISE_XOR_OPERATOR =
|
| + const TokenClass.con2('BITWISE_XOR_OPERATOR', 5, 9);
|
|
|
| /**
|
| * A value used to indicate that the token type is a cascade operator.
|
| */
|
| - static const TokenClass CASCADE_OPERATOR = const TokenClass.con2('CASCADE_OPERATOR', 6, 2);
|
| + static const TokenClass CASCADE_OPERATOR =
|
| + const TokenClass.con2('CASCADE_OPERATOR', 6, 2);
|
|
|
| /**
|
| * A value used to indicate that the token type is a conditional operator.
|
| */
|
| - static const TokenClass CONDITIONAL_OPERATOR = const TokenClass.con2('CONDITIONAL_OPERATOR', 7, 3);
|
| + static const TokenClass CONDITIONAL_OPERATOR =
|
| + const TokenClass.con2('CONDITIONAL_OPERATOR', 7, 3);
|
|
|
| /**
|
| * A value used to indicate that the token type is an equality operator.
|
| */
|
| - static const TokenClass EQUALITY_OPERATOR = const TokenClass.con2('EQUALITY_OPERATOR', 8, 6);
|
| + static const TokenClass EQUALITY_OPERATOR =
|
| + const TokenClass.con2('EQUALITY_OPERATOR', 8, 6);
|
|
|
| /**
|
| * A value used to indicate that the token type is a logical-and operator.
|
| */
|
| - static const TokenClass LOGICAL_AND_OPERATOR = const TokenClass.con2('LOGICAL_AND_OPERATOR', 9, 5);
|
| + static const TokenClass LOGICAL_AND_OPERATOR =
|
| + const TokenClass.con2('LOGICAL_AND_OPERATOR', 9, 5);
|
|
|
| /**
|
| * A value used to indicate that the token type is a logical-or operator.
|
| */
|
| - static const TokenClass LOGICAL_OR_OPERATOR = const TokenClass.con2('LOGICAL_OR_OPERATOR', 10, 4);
|
| + static const TokenClass LOGICAL_OR_OPERATOR =
|
| + const TokenClass.con2('LOGICAL_OR_OPERATOR', 10, 4);
|
|
|
| /**
|
| * A value used to indicate that the token type is a multiplicative operator.
|
| */
|
| - static const TokenClass MULTIPLICATIVE_OPERATOR = const TokenClass.con2('MULTIPLICATIVE_OPERATOR', 11, 13);
|
| + static const TokenClass MULTIPLICATIVE_OPERATOR =
|
| + const TokenClass.con2('MULTIPLICATIVE_OPERATOR', 11, 13);
|
|
|
| /**
|
| * A value used to indicate that the token type is a relational operator.
|
| */
|
| - static const TokenClass RELATIONAL_OPERATOR = const TokenClass.con2('RELATIONAL_OPERATOR', 12, 7);
|
| + static const TokenClass RELATIONAL_OPERATOR =
|
| + const TokenClass.con2('RELATIONAL_OPERATOR', 12, 7);
|
|
|
| /**
|
| * A value used to indicate that the token type is a shift operator.
|
| */
|
| - static const TokenClass SHIFT_OPERATOR = const TokenClass.con2('SHIFT_OPERATOR', 13, 11);
|
| + static const TokenClass SHIFT_OPERATOR =
|
| + const TokenClass.con2('SHIFT_OPERATOR', 13, 11);
|
|
|
| /**
|
| * A value used to indicate that the token type is a unary operator.
|
| */
|
| - static const TokenClass UNARY_POSTFIX_OPERATOR = const TokenClass.con2('UNARY_POSTFIX_OPERATOR', 14, 15);
|
| + static const TokenClass UNARY_POSTFIX_OPERATOR =
|
| + const TokenClass.con2('UNARY_POSTFIX_OPERATOR', 14, 15);
|
|
|
| /**
|
| * A value used to indicate that the token type is a unary operator.
|
| */
|
| - static const TokenClass UNARY_PREFIX_OPERATOR = const TokenClass.con2('UNARY_PREFIX_OPERATOR', 15, 14);
|
| + static const TokenClass UNARY_PREFIX_OPERATOR =
|
| + const TokenClass.con2('UNARY_PREFIX_OPERATOR', 15, 14);
|
|
|
| static const List<TokenClass> values = const [
|
| NO_CLASS,
|
| @@ -2346,7 +2484,8 @@ class TokenClass extends Enum<TokenClass> {
|
|
|
| const TokenClass.con1(String name, int ordinal) : this.con2(name, ordinal, 0);
|
|
|
| - const TokenClass.con2(String name, int ordinal, this.precedence) : super(name, ordinal);
|
| + const TokenClass.con2(String name, int ordinal, this.precedence)
|
| + : super(name, ordinal);
|
| }
|
|
|
| /**
|
| @@ -2357,7 +2496,8 @@ class TokenType extends Enum<TokenType> {
|
| /**
|
| * The type of the token that marks the end of the input.
|
| */
|
| - static const TokenType EOF = const TokenType_EOF('EOF', 0, TokenClass.NO_CLASS, "");
|
| + static const TokenType EOF =
|
| + const TokenType_EOF('EOF', 0, TokenClass.NO_CLASS, "");
|
|
|
| static const TokenType DOUBLE = const TokenType.con1('DOUBLE', 1);
|
|
|
| @@ -2369,129 +2509,210 @@ class TokenType extends Enum<TokenType> {
|
|
|
| static const TokenType KEYWORD = const TokenType.con1('KEYWORD', 5);
|
|
|
| - static const TokenType MULTI_LINE_COMMENT = const TokenType.con1('MULTI_LINE_COMMENT', 6);
|
| + static const TokenType MULTI_LINE_COMMENT =
|
| + const TokenType.con1('MULTI_LINE_COMMENT', 6);
|
|
|
| static const TokenType SCRIPT_TAG = const TokenType.con1('SCRIPT_TAG', 7);
|
|
|
| - static const TokenType SINGLE_LINE_COMMENT = const TokenType.con1('SINGLE_LINE_COMMENT', 8);
|
| + static const TokenType SINGLE_LINE_COMMENT =
|
| + const TokenType.con1('SINGLE_LINE_COMMENT', 8);
|
|
|
| static const TokenType STRING = const TokenType.con1('STRING', 9);
|
|
|
| - static const TokenType AMPERSAND = const TokenType.con2('AMPERSAND', 10, TokenClass.BITWISE_AND_OPERATOR, "&");
|
| + static const TokenType AMPERSAND =
|
| + const TokenType.con2('AMPERSAND', 10, TokenClass.BITWISE_AND_OPERATOR, "&");
|
|
|
| - static const TokenType AMPERSAND_AMPERSAND = const TokenType.con2('AMPERSAND_AMPERSAND', 11, TokenClass.LOGICAL_AND_OPERATOR, "&&");
|
| + static const TokenType AMPERSAND_AMPERSAND = const TokenType.con2(
|
| + 'AMPERSAND_AMPERSAND',
|
| + 11,
|
| + TokenClass.LOGICAL_AND_OPERATOR,
|
| + "&&");
|
|
|
| - static const TokenType AMPERSAND_EQ = const TokenType.con2('AMPERSAND_EQ', 12, TokenClass.ASSIGNMENT_OPERATOR, "&=");
|
| + static const TokenType AMPERSAND_EQ =
|
| + const TokenType.con2('AMPERSAND_EQ', 12, TokenClass.ASSIGNMENT_OPERATOR, "&=");
|
|
|
| - static const TokenType AT = const TokenType.con2('AT', 13, TokenClass.NO_CLASS, "@");
|
| + static const TokenType AT =
|
| + const TokenType.con2('AT', 13, TokenClass.NO_CLASS, "@");
|
|
|
| - static const TokenType BANG = const TokenType.con2('BANG', 14, TokenClass.UNARY_PREFIX_OPERATOR, "!");
|
| + static const TokenType BANG =
|
| + const TokenType.con2('BANG', 14, TokenClass.UNARY_PREFIX_OPERATOR, "!");
|
|
|
| - static const TokenType BANG_EQ = const TokenType.con2('BANG_EQ', 15, TokenClass.EQUALITY_OPERATOR, "!=");
|
| + static const TokenType BANG_EQ =
|
| + const TokenType.con2('BANG_EQ', 15, TokenClass.EQUALITY_OPERATOR, "!=");
|
|
|
| - static const TokenType BAR = const TokenType.con2('BAR', 16, TokenClass.BITWISE_OR_OPERATOR, "|");
|
| + static const TokenType BAR =
|
| + const TokenType.con2('BAR', 16, TokenClass.BITWISE_OR_OPERATOR, "|");
|
|
|
| - static const TokenType BAR_BAR = const TokenType.con2('BAR_BAR', 17, TokenClass.LOGICAL_OR_OPERATOR, "||");
|
| + static const TokenType BAR_BAR =
|
| + const TokenType.con2('BAR_BAR', 17, TokenClass.LOGICAL_OR_OPERATOR, "||");
|
|
|
| - static const TokenType BAR_EQ = const TokenType.con2('BAR_EQ', 18, TokenClass.ASSIGNMENT_OPERATOR, "|=");
|
| + static const TokenType BAR_EQ =
|
| + const TokenType.con2('BAR_EQ', 18, TokenClass.ASSIGNMENT_OPERATOR, "|=");
|
|
|
| - static const TokenType COLON = const TokenType.con2('COLON', 19, TokenClass.NO_CLASS, ":");
|
| + static const TokenType COLON =
|
| + const TokenType.con2('COLON', 19, TokenClass.NO_CLASS, ":");
|
|
|
| - static const TokenType COMMA = const TokenType.con2('COMMA', 20, TokenClass.NO_CLASS, ",");
|
| + static const TokenType COMMA =
|
| + const TokenType.con2('COMMA', 20, TokenClass.NO_CLASS, ",");
|
|
|
| - static const TokenType CARET = const TokenType.con2('CARET', 21, TokenClass.BITWISE_XOR_OPERATOR, "^");
|
| + static const TokenType CARET =
|
| + const TokenType.con2('CARET', 21, TokenClass.BITWISE_XOR_OPERATOR, "^");
|
|
|
| - static const TokenType CARET_EQ = const TokenType.con2('CARET_EQ', 22, TokenClass.ASSIGNMENT_OPERATOR, "^=");
|
| + static const TokenType CARET_EQ =
|
| + const TokenType.con2('CARET_EQ', 22, TokenClass.ASSIGNMENT_OPERATOR, "^=");
|
|
|
| - static const TokenType CLOSE_CURLY_BRACKET = const TokenType.con2('CLOSE_CURLY_BRACKET', 23, TokenClass.NO_CLASS, "}");
|
| + static const TokenType CLOSE_CURLY_BRACKET =
|
| + const TokenType.con2('CLOSE_CURLY_BRACKET', 23, TokenClass.NO_CLASS, "}");
|
|
|
| - static const TokenType CLOSE_PAREN = const TokenType.con2('CLOSE_PAREN', 24, TokenClass.NO_CLASS, ")");
|
| + static const TokenType CLOSE_PAREN =
|
| + const TokenType.con2('CLOSE_PAREN', 24, TokenClass.NO_CLASS, ")");
|
|
|
| - static const TokenType CLOSE_SQUARE_BRACKET = const TokenType.con2('CLOSE_SQUARE_BRACKET', 25, TokenClass.NO_CLASS, "]");
|
| + static const TokenType CLOSE_SQUARE_BRACKET =
|
| + const TokenType.con2('CLOSE_SQUARE_BRACKET', 25, TokenClass.NO_CLASS, "]");
|
|
|
| - static const TokenType EQ = const TokenType.con2('EQ', 26, TokenClass.ASSIGNMENT_OPERATOR, "=");
|
| + static const TokenType EQ =
|
| + const TokenType.con2('EQ', 26, TokenClass.ASSIGNMENT_OPERATOR, "=");
|
|
|
| - static const TokenType EQ_EQ = const TokenType.con2('EQ_EQ', 27, TokenClass.EQUALITY_OPERATOR, "==");
|
| + static const TokenType EQ_EQ =
|
| + const TokenType.con2('EQ_EQ', 27, TokenClass.EQUALITY_OPERATOR, "==");
|
|
|
| - static const TokenType FUNCTION = const TokenType.con2('FUNCTION', 28, TokenClass.NO_CLASS, "=>");
|
| + static const TokenType FUNCTION =
|
| + const TokenType.con2('FUNCTION', 28, TokenClass.NO_CLASS, "=>");
|
|
|
| - static const TokenType GT = const TokenType.con2('GT', 29, TokenClass.RELATIONAL_OPERATOR, ">");
|
| + static const TokenType GT =
|
| + const TokenType.con2('GT', 29, TokenClass.RELATIONAL_OPERATOR, ">");
|
|
|
| - static const TokenType GT_EQ = const TokenType.con2('GT_EQ', 30, TokenClass.RELATIONAL_OPERATOR, ">=");
|
| + static const TokenType GT_EQ =
|
| + const TokenType.con2('GT_EQ', 30, TokenClass.RELATIONAL_OPERATOR, ">=");
|
|
|
| - static const TokenType GT_GT = const TokenType.con2('GT_GT', 31, TokenClass.SHIFT_OPERATOR, ">>");
|
| + static const TokenType GT_GT =
|
| + const TokenType.con2('GT_GT', 31, TokenClass.SHIFT_OPERATOR, ">>");
|
|
|
| - static const TokenType GT_GT_EQ = const TokenType.con2('GT_GT_EQ', 32, TokenClass.ASSIGNMENT_OPERATOR, ">>=");
|
| + static const TokenType GT_GT_EQ =
|
| + const TokenType.con2('GT_GT_EQ', 32, TokenClass.ASSIGNMENT_OPERATOR, ">>=");
|
|
|
| - static const TokenType HASH = const TokenType.con2('HASH', 33, TokenClass.NO_CLASS, "#");
|
| + static const TokenType HASH =
|
| + const TokenType.con2('HASH', 33, TokenClass.NO_CLASS, "#");
|
|
|
| - static const TokenType INDEX = const TokenType.con2('INDEX', 34, TokenClass.UNARY_POSTFIX_OPERATOR, "[]");
|
| + static const TokenType INDEX =
|
| + const TokenType.con2('INDEX', 34, TokenClass.UNARY_POSTFIX_OPERATOR, "[]");
|
|
|
| - static const TokenType INDEX_EQ = const TokenType.con2('INDEX_EQ', 35, TokenClass.UNARY_POSTFIX_OPERATOR, "[]=");
|
| + static const TokenType INDEX_EQ =
|
| + const TokenType.con2('INDEX_EQ', 35, TokenClass.UNARY_POSTFIX_OPERATOR, "[]=");
|
|
|
| - static const TokenType IS = const TokenType.con2('IS', 36, TokenClass.RELATIONAL_OPERATOR, "is");
|
| + static const TokenType IS =
|
| + const TokenType.con2('IS', 36, TokenClass.RELATIONAL_OPERATOR, "is");
|
|
|
| - static const TokenType LT = const TokenType.con2('LT', 37, TokenClass.RELATIONAL_OPERATOR, "<");
|
| + static const TokenType LT =
|
| + const TokenType.con2('LT', 37, TokenClass.RELATIONAL_OPERATOR, "<");
|
|
|
| - static const TokenType LT_EQ = const TokenType.con2('LT_EQ', 38, TokenClass.RELATIONAL_OPERATOR, "<=");
|
| + static const TokenType LT_EQ =
|
| + const TokenType.con2('LT_EQ', 38, TokenClass.RELATIONAL_OPERATOR, "<=");
|
|
|
| - static const TokenType LT_LT = const TokenType.con2('LT_LT', 39, TokenClass.SHIFT_OPERATOR, "<<");
|
| + static const TokenType LT_LT =
|
| + const TokenType.con2('LT_LT', 39, TokenClass.SHIFT_OPERATOR, "<<");
|
|
|
| - static const TokenType LT_LT_EQ = const TokenType.con2('LT_LT_EQ', 40, TokenClass.ASSIGNMENT_OPERATOR, "<<=");
|
| + static const TokenType LT_LT_EQ =
|
| + const TokenType.con2('LT_LT_EQ', 40, TokenClass.ASSIGNMENT_OPERATOR, "<<=");
|
|
|
| - static const TokenType MINUS = const TokenType.con2('MINUS', 41, TokenClass.ADDITIVE_OPERATOR, "-");
|
| + static const TokenType MINUS =
|
| + const TokenType.con2('MINUS', 41, TokenClass.ADDITIVE_OPERATOR, "-");
|
|
|
| - static const TokenType MINUS_EQ = const TokenType.con2('MINUS_EQ', 42, TokenClass.ASSIGNMENT_OPERATOR, "-=");
|
| + static const TokenType MINUS_EQ =
|
| + const TokenType.con2('MINUS_EQ', 42, TokenClass.ASSIGNMENT_OPERATOR, "-=");
|
|
|
| - static const TokenType MINUS_MINUS = const TokenType.con2('MINUS_MINUS', 43, TokenClass.UNARY_PREFIX_OPERATOR, "--");
|
| + static const TokenType MINUS_MINUS = const TokenType.con2(
|
| + 'MINUS_MINUS',
|
| + 43,
|
| + TokenClass.UNARY_PREFIX_OPERATOR,
|
| + "--");
|
|
|
| - static const TokenType OPEN_CURLY_BRACKET = const TokenType.con2('OPEN_CURLY_BRACKET', 44, TokenClass.NO_CLASS, "{");
|
| + static const TokenType OPEN_CURLY_BRACKET =
|
| + const TokenType.con2('OPEN_CURLY_BRACKET', 44, TokenClass.NO_CLASS, "{");
|
|
|
| - static const TokenType OPEN_PAREN = const TokenType.con2('OPEN_PAREN', 45, TokenClass.UNARY_POSTFIX_OPERATOR, "(");
|
| + static const TokenType OPEN_PAREN =
|
| + const TokenType.con2('OPEN_PAREN', 45, TokenClass.UNARY_POSTFIX_OPERATOR, "(");
|
|
|
| - static const TokenType OPEN_SQUARE_BRACKET = const TokenType.con2('OPEN_SQUARE_BRACKET', 46, TokenClass.UNARY_POSTFIX_OPERATOR, "[");
|
| + static const TokenType OPEN_SQUARE_BRACKET = const TokenType.con2(
|
| + 'OPEN_SQUARE_BRACKET',
|
| + 46,
|
| + TokenClass.UNARY_POSTFIX_OPERATOR,
|
| + "[");
|
|
|
| - static const TokenType PERCENT = const TokenType.con2('PERCENT', 47, TokenClass.MULTIPLICATIVE_OPERATOR, "%");
|
| + static const TokenType PERCENT =
|
| + const TokenType.con2('PERCENT', 47, TokenClass.MULTIPLICATIVE_OPERATOR, "%");
|
|
|
| - static const TokenType PERCENT_EQ = const TokenType.con2('PERCENT_EQ', 48, TokenClass.ASSIGNMENT_OPERATOR, "%=");
|
| + static const TokenType PERCENT_EQ =
|
| + const TokenType.con2('PERCENT_EQ', 48, TokenClass.ASSIGNMENT_OPERATOR, "%=");
|
|
|
| - static const TokenType PERIOD = const TokenType.con2('PERIOD', 49, TokenClass.UNARY_POSTFIX_OPERATOR, ".");
|
| + static const TokenType PERIOD =
|
| + const TokenType.con2('PERIOD', 49, TokenClass.UNARY_POSTFIX_OPERATOR, ".");
|
|
|
| - static const TokenType PERIOD_PERIOD = const TokenType.con2('PERIOD_PERIOD', 50, TokenClass.CASCADE_OPERATOR, "..");
|
| + static const TokenType PERIOD_PERIOD =
|
| + const TokenType.con2('PERIOD_PERIOD', 50, TokenClass.CASCADE_OPERATOR, "..");
|
|
|
| - static const TokenType PLUS = const TokenType.con2('PLUS', 51, TokenClass.ADDITIVE_OPERATOR, "+");
|
| + static const TokenType PLUS =
|
| + const TokenType.con2('PLUS', 51, TokenClass.ADDITIVE_OPERATOR, "+");
|
|
|
| - static const TokenType PLUS_EQ = const TokenType.con2('PLUS_EQ', 52, TokenClass.ASSIGNMENT_OPERATOR, "+=");
|
| + static const TokenType PLUS_EQ =
|
| + const TokenType.con2('PLUS_EQ', 52, TokenClass.ASSIGNMENT_OPERATOR, "+=");
|
|
|
| - static const TokenType PLUS_PLUS = const TokenType.con2('PLUS_PLUS', 53, TokenClass.UNARY_PREFIX_OPERATOR, "++");
|
| + static const TokenType PLUS_PLUS =
|
| + const TokenType.con2('PLUS_PLUS', 53, TokenClass.UNARY_PREFIX_OPERATOR, "++");
|
|
|
| - static const TokenType QUESTION = const TokenType.con2('QUESTION', 54, TokenClass.CONDITIONAL_OPERATOR, "?");
|
| + static const TokenType QUESTION =
|
| + const TokenType.con2('QUESTION', 54, TokenClass.CONDITIONAL_OPERATOR, "?");
|
|
|
| - static const TokenType SEMICOLON = const TokenType.con2('SEMICOLON', 55, TokenClass.NO_CLASS, ";");
|
| + static const TokenType SEMICOLON =
|
| + const TokenType.con2('SEMICOLON', 55, TokenClass.NO_CLASS, ";");
|
|
|
| - static const TokenType SLASH = const TokenType.con2('SLASH', 56, TokenClass.MULTIPLICATIVE_OPERATOR, "/");
|
| + static const TokenType SLASH =
|
| + const TokenType.con2('SLASH', 56, TokenClass.MULTIPLICATIVE_OPERATOR, "/");
|
|
|
| - static const TokenType SLASH_EQ = const TokenType.con2('SLASH_EQ', 57, TokenClass.ASSIGNMENT_OPERATOR, "/=");
|
| + static const TokenType SLASH_EQ =
|
| + const TokenType.con2('SLASH_EQ', 57, TokenClass.ASSIGNMENT_OPERATOR, "/=");
|
|
|
| - static const TokenType STAR = const TokenType.con2('STAR', 58, TokenClass.MULTIPLICATIVE_OPERATOR, "*");
|
| + static const TokenType STAR =
|
| + const TokenType.con2('STAR', 58, TokenClass.MULTIPLICATIVE_OPERATOR, "*");
|
|
|
| - static const TokenType STAR_EQ = const TokenType.con2('STAR_EQ', 59, TokenClass.ASSIGNMENT_OPERATOR, "*=");
|
| + static const TokenType STAR_EQ =
|
| + const TokenType.con2('STAR_EQ', 59, TokenClass.ASSIGNMENT_OPERATOR, "*=");
|
|
|
| - static const TokenType STRING_INTERPOLATION_EXPRESSION = const TokenType.con2('STRING_INTERPOLATION_EXPRESSION', 60, TokenClass.NO_CLASS, "\${");
|
| + static const TokenType STRING_INTERPOLATION_EXPRESSION = const TokenType.con2(
|
| + 'STRING_INTERPOLATION_EXPRESSION',
|
| + 60,
|
| + TokenClass.NO_CLASS,
|
| + "\${");
|
|
|
| - static const TokenType STRING_INTERPOLATION_IDENTIFIER = const TokenType.con2('STRING_INTERPOLATION_IDENTIFIER', 61, TokenClass.NO_CLASS, "\$");
|
| + static const TokenType STRING_INTERPOLATION_IDENTIFIER = const TokenType.con2(
|
| + 'STRING_INTERPOLATION_IDENTIFIER',
|
| + 61,
|
| + TokenClass.NO_CLASS,
|
| + "\$");
|
|
|
| - static const TokenType TILDE = const TokenType.con2('TILDE', 62, TokenClass.UNARY_PREFIX_OPERATOR, "~");
|
| + static const TokenType TILDE =
|
| + const TokenType.con2('TILDE', 62, TokenClass.UNARY_PREFIX_OPERATOR, "~");
|
|
|
| - static const TokenType TILDE_SLASH = const TokenType.con2('TILDE_SLASH', 63, TokenClass.MULTIPLICATIVE_OPERATOR, "~/");
|
| + static const TokenType TILDE_SLASH = const TokenType.con2(
|
| + 'TILDE_SLASH',
|
| + 63,
|
| + TokenClass.MULTIPLICATIVE_OPERATOR,
|
| + "~/");
|
|
|
| - static const TokenType TILDE_SLASH_EQ = const TokenType.con2('TILDE_SLASH_EQ', 64, TokenClass.ASSIGNMENT_OPERATOR, "~/=");
|
| + static const TokenType TILDE_SLASH_EQ = const TokenType.con2(
|
| + 'TILDE_SLASH_EQ',
|
| + 64,
|
| + TokenClass.ASSIGNMENT_OPERATOR,
|
| + "~/=");
|
|
|
| - static const TokenType BACKPING = const TokenType.con2('BACKPING', 65, TokenClass.NO_CLASS, "`");
|
| + static const TokenType BACKPING =
|
| + const TokenType.con2('BACKPING', 65, TokenClass.NO_CLASS, "`");
|
|
|
| - static const TokenType BACKSLASH = const TokenType.con2('BACKSLASH', 66, TokenClass.NO_CLASS, "\\");
|
| + static const TokenType BACKSLASH =
|
| + const TokenType.con2('BACKSLASH', 66, TokenClass.NO_CLASS, "\\");
|
|
|
| - static const TokenType PERIOD_PERIOD_PERIOD = const TokenType.con2('PERIOD_PERIOD_PERIOD', 67, TokenClass.NO_CLASS, "...");
|
| + static const TokenType PERIOD_PERIOD_PERIOD =
|
| + const TokenType.con2('PERIOD_PERIOD_PERIOD', 67, TokenClass.NO_CLASS, "...");
|
|
|
| static const List<TokenType> values = const [
|
| EOF,
|
| @@ -2574,16 +2795,11 @@ class TokenType extends Enum<TokenType> {
|
| */
|
| final String lexeme;
|
|
|
| - const TokenType.con1(String name, int ordinal) : this.con2(name, ordinal, TokenClass.NO_CLASS, null);
|
| -
|
| - const TokenType.con2(String name, int ordinal, this._tokenClass, this.lexeme) : super(name, ordinal);
|
| + const TokenType.con1(String name, int ordinal)
|
| + : this.con2(name, ordinal, TokenClass.NO_CLASS, null);
|
|
|
| - /**
|
| - * Return the precedence of the token, or `0` if the token does not represent an operator.
|
| - *
|
| - * @return the precedence of the token
|
| - */
|
| - int get precedence => _tokenClass.precedence;
|
| + const TokenType.con2(String name, int ordinal, this._tokenClass, this.lexeme)
|
| + : super(name, ordinal);
|
|
|
| /**
|
| * Return `true` if this type of token represents an additive operator.
|
| @@ -2597,7 +2813,8 @@ class TokenType extends Enum<TokenType> {
|
| *
|
| * @return `true` if this type of token represents an assignment operator
|
| */
|
| - bool get isAssignmentOperator => _tokenClass == TokenClass.ASSIGNMENT_OPERATOR;
|
| + bool get isAssignmentOperator =>
|
| + _tokenClass == TokenClass.ASSIGNMENT_OPERATOR;
|
|
|
| /**
|
| * Return `true` if this type of token represents an associative operator. An associative
|
| @@ -2611,7 +2828,14 @@ class TokenType extends Enum<TokenType> {
|
| *
|
| * @return `true` if this type of token represents an associative operator
|
| */
|
| - bool get isAssociativeOperator => this == AMPERSAND || this == AMPERSAND_AMPERSAND || this == BAR || this == BAR_BAR || this == CARET || this == PLUS || this == STAR;
|
| + bool get isAssociativeOperator =>
|
| + this == AMPERSAND ||
|
| + this == AMPERSAND_AMPERSAND ||
|
| + this == BAR ||
|
| + this == BAR_BAR ||
|
| + this == CARET ||
|
| + this == PLUS ||
|
| + this == STAR;
|
|
|
| /**
|
| * Return `true` if this type of token represents an equality operator.
|
| @@ -2625,28 +2849,35 @@ class TokenType extends Enum<TokenType> {
|
| *
|
| * @return `true` if this type of token represents an increment operator
|
| */
|
| - bool get isIncrementOperator => identical(lexeme, "++") || identical(lexeme, "--");
|
| + bool get isIncrementOperator =>
|
| + identical(lexeme, "++") || identical(lexeme, "--");
|
|
|
| /**
|
| * Return `true` if this type of token represents a multiplicative operator.
|
| *
|
| * @return `true` if this type of token represents a multiplicative operator
|
| */
|
| - bool get isMultiplicativeOperator => _tokenClass == TokenClass.MULTIPLICATIVE_OPERATOR;
|
| + bool get isMultiplicativeOperator =>
|
| + _tokenClass == TokenClass.MULTIPLICATIVE_OPERATOR;
|
|
|
| /**
|
| * Return `true` if this token type represents an operator.
|
| *
|
| * @return `true` if this token type represents an operator
|
| */
|
| - bool get isOperator => _tokenClass != TokenClass.NO_CLASS && this != OPEN_PAREN && this != OPEN_SQUARE_BRACKET && this != PERIOD;
|
| + bool get isOperator =>
|
| + _tokenClass != TokenClass.NO_CLASS &&
|
| + this != OPEN_PAREN &&
|
| + this != OPEN_SQUARE_BRACKET &&
|
| + this != PERIOD;
|
|
|
| /**
|
| * Return `true` if this type of token represents a relational operator.
|
| *
|
| * @return `true` if this type of token represents a relational operator
|
| */
|
| - bool get isRelationalOperator => _tokenClass == TokenClass.RELATIONAL_OPERATOR;
|
| + bool get isRelationalOperator =>
|
| + _tokenClass == TokenClass.RELATIONAL_OPERATOR;
|
|
|
| /**
|
| * Return `true` if this type of token represents a shift operator.
|
| @@ -2660,25 +2891,54 @@ class TokenType extends Enum<TokenType> {
|
| *
|
| * @return `true` if this type of token represents a unary postfix operator
|
| */
|
| - bool get isUnaryPostfixOperator => _tokenClass == TokenClass.UNARY_POSTFIX_OPERATOR;
|
| + bool get isUnaryPostfixOperator =>
|
| + _tokenClass == TokenClass.UNARY_POSTFIX_OPERATOR;
|
|
|
| /**
|
| * Return `true` if this type of token represents a unary prefix operator.
|
| *
|
| * @return `true` if this type of token represents a unary prefix operator
|
| */
|
| - bool get isUnaryPrefixOperator => _tokenClass == TokenClass.UNARY_PREFIX_OPERATOR;
|
| + bool get isUnaryPrefixOperator =>
|
| + _tokenClass == TokenClass.UNARY_PREFIX_OPERATOR;
|
|
|
| /**
|
| * Return `true` if this token type represents an operator that can be defined by users.
|
| *
|
| * @return `true` if this token type represents an operator that can be defined by users
|
| */
|
| - bool get isUserDefinableOperator => identical(lexeme, "==") || identical(lexeme, "~") || identical(lexeme, "[]") || identical(lexeme, "[]=") || identical(lexeme, "*") || identical(lexeme, "/") || identical(lexeme, "%") || identical(lexeme, "~/") || identical(lexeme, "+") || identical(lexeme, "-") || identical(lexeme, "<<") || identical(lexeme, ">>") || identical(lexeme, ">=") || identical(lexeme, ">") || identical(lexeme, "<=") || identical(lexeme, "<") || identical(lexeme, "&") || identical(lexeme, "^") || identical(lexeme, "|");
|
| + bool get isUserDefinableOperator =>
|
| + identical(lexeme, "==") ||
|
| + identical(lexeme, "~") ||
|
| + identical(lexeme, "[]") ||
|
| + identical(lexeme, "[]=") ||
|
| + identical(lexeme, "*") ||
|
| + identical(lexeme, "/") ||
|
| + identical(lexeme, "%") ||
|
| + identical(lexeme, "~/") ||
|
| + identical(lexeme, "+") ||
|
| + identical(lexeme, "-") ||
|
| + identical(lexeme, "<<") ||
|
| + identical(lexeme, ">>") ||
|
| + identical(lexeme, ">=") ||
|
| + identical(lexeme, ">") ||
|
| + identical(lexeme, "<=") ||
|
| + identical(lexeme, "<") ||
|
| + identical(lexeme, "&") ||
|
| + identical(lexeme, "^") ||
|
| + identical(lexeme, "|");
|
| +
|
| + /**
|
| + * Return the precedence of the token, or `0` if the token does not represent an operator.
|
| + *
|
| + * @return the precedence of the token
|
| + */
|
| + int get precedence => _tokenClass.precedence;
|
| }
|
|
|
| class TokenType_EOF extends TokenType {
|
| - const TokenType_EOF(String name, int ordinal, TokenClass arg0, String arg1) : super.con2(name, ordinal, arg0, arg1);
|
| + const TokenType_EOF(String name, int ordinal, TokenClass arg0, String arg1)
|
| + : super.con2(name, ordinal, arg0, arg1);
|
|
|
| @override
|
| String toString() => "-eof-";
|
| @@ -2702,11 +2962,12 @@ class TokenWithComment extends Token {
|
| * @param offset the offset from the beginning of the file to the first character in the token
|
| * @param precedingComment the first comment in the list of comments that precede this token
|
| */
|
| - TokenWithComment(TokenType type, int offset, this._precedingComment) : super(type, offset);
|
| + TokenWithComment(TokenType type, int offset, this._precedingComment)
|
| + : super(type, offset);
|
|
|
| @override
|
| - Token copy() => new TokenWithComment(type, offset, _precedingComment);
|
| + Token get precedingComments => _precedingComment;
|
|
|
| @override
|
| - Token get precedingComments => _precedingComment;
|
| -}
|
| + Token copy() => new TokenWithComment(type, offset, _precedingComment);
|
| +}
|
|
|