Index: pkg/front_end/lib/src/fasta/scanner/token.dart |
diff --git a/pkg/front_end/lib/src/fasta/scanner/token.dart b/pkg/front_end/lib/src/fasta/scanner/token.dart |
index f419056f5cead714a5de7a7174fcd821d231f8b6..6cb01c8433b5a4a0cb63ec2d55fc6c7d1fd5c4c0 100644 |
--- a/pkg/front_end/lib/src/fasta/scanner/token.dart |
+++ b/pkg/front_end/lib/src/fasta/scanner/token.dart |
@@ -5,145 +5,25 @@ |
library fasta.scanner.token; |
import '../../scanner/token.dart' as analyzer; |
-import '../../scanner/token.dart' show TokenType; |
+import '../../scanner/token.dart' show Token, TokenType; |
import 'token_constants.dart' show IDENTIFIER_TOKEN; |
import 'string_canonicalizer.dart'; |
/** |
- * A token that doubles as a linked list. |
- */ |
-abstract class Token implements analyzer.TokenWithComment { |
- @override |
- int charOffset; |
- |
- Token(this.charOffset); |
- |
- @override |
- analyzer.Token next; |
- |
- @override |
- analyzer.Token previous; |
- |
- @override |
- analyzer.CommentToken precedingComments; |
- |
- @override |
- String get stringValue => type.stringValue; |
- |
- @override |
- int get kind => type.kind; |
- |
- /** |
- * Returns a textual representation of this token to be used for debugging |
- * purposes. The resulting string might contain information about the |
- * structure of the token, for example 'StringToken(foo)' for the identifier |
- * token 'foo'. |
- * |
- * Use [lexeme] for the text actually parsed by the token. |
- */ |
- String toString(); |
- |
- @override |
- int get charCount => lexeme.length; |
- |
- @override |
- int get charEnd => charOffset + charCount; |
- |
- @override |
- bool get isEof => type == analyzer.TokenType.EOF; |
- |
- bool get isBuiltInIdentifier => false; |
- |
- @override |
- bool get isOperator => type.isOperator; |
- |
- @override |
- bool get isUserDefinableOperator => type.isUserDefinableOperator; |
- |
- @override |
- int get offset => charOffset; |
- |
- @override |
- set offset(int newOffset) { |
- charOffset = newOffset; |
- } |
- |
- @override |
- int get length => charCount; |
- |
- @override |
- int get end => charEnd; |
- |
- @override |
- analyzer.Token copy() { |
- return copyWithoutComments() |
- ..precedingComments = copyComments(precedingComments); |
- } |
- |
- @override |
- analyzer.Token copyComments(analyzer.Token token) { |
- if (token == null) { |
- return null; |
- } |
- Token head = token.copy(); |
- Token tail = head; |
- token = token.next; |
- while (token != null) { |
- tail = tail.setNext(token.copy()); |
- token = token.next; |
- } |
- return head; |
- } |
- |
- /// Return a copy of the receiver without [preceedingComments]. |
- Token copyWithoutComments(); |
- |
- @override |
- bool get isSynthetic => false; |
- |
- @override |
- analyzer.Keyword get keyword => null; |
- |
- @override |
- bool matchesAny(List<analyzer.TokenType> types) { |
- for (analyzer.TokenType type in types) { |
- if (this.type == type) { |
- return true; |
- } |
- } |
- return false; |
- } |
- |
- @override |
- analyzer.Token setNext(analyzer.Token token) { |
- next = token as Token; |
- next.previous = this; |
- return token; |
- } |
- |
- @override |
- analyzer.Token setNextWithoutSettingPrevious(analyzer.Token token) { |
- next = token as Token; |
- return token; |
- } |
- |
- @override |
- Object value() => lexeme; |
-} |
- |
-/** |
* A [SymbolToken] represents the symbol in its precedence info. |
* Also used for end of file with EOF_INFO. |
*/ |
-class SymbolToken extends Token { |
- final TokenType type; |
- |
- SymbolToken(this.type, int charOffset) : super(charOffset); |
- |
- factory SymbolToken.eof(int charOffset) { |
- var eof = new SyntheticSymbolToken(analyzer.TokenType.EOF, charOffset); |
+class SymbolToken extends analyzer.TokenWithComment { |
+ SymbolToken(TokenType type, int offset, |
+ [analyzer.CommentToken precedingComments]) |
+ : super(type, offset, precedingComments); |
+ |
+ factory SymbolToken.eof(int charOffset, |
+ [analyzer.CommentToken precedingComments]) { |
+ var eof = |
+ new SyntheticSymbolToken(TokenType.EOF, charOffset, precedingComments); |
// EOF points to itself so there's always infinite look-ahead. |
eof.previous = eof; |
eof.next = eof; |
@@ -154,15 +34,12 @@ class SymbolToken extends Token { |
String get lexeme => type.value; |
@override |
- bool get isIdentifier => false; |
- |
- @override |
String toString() => "SymbolToken(${isEof ? '-eof-' : lexeme})"; |
@override |
- Token copyWithoutComments() => isEof |
- ? new SymbolToken.eof(charOffset) |
- : new SymbolToken(type, charOffset); |
+ Token copy() => isEof |
+ ? new SymbolToken.eof(charOffset, precedingComments) |
+ : new SymbolToken(type, charOffset, precedingComments); |
} |
/** |
@@ -172,19 +49,17 @@ class SymbolToken extends Token { |
* then it will insert an synthetic ')'. |
*/ |
class SyntheticSymbolToken extends SymbolToken { |
- SyntheticSymbolToken(TokenType type, int charOffset) |
- : super(type, charOffset); |
- |
- @override |
- int get charCount => 0; |
+ SyntheticSymbolToken(TokenType type, int charOffset, |
+ [analyzer.CommentToken precedingComments]) |
+ : super(type, charOffset, precedingComments); |
@override |
- bool get isSynthetic => true; |
+ int get length => 0; |
@override |
- Token copyWithoutComments() => isEof |
- ? new SymbolToken.eof(charOffset) |
- : new SyntheticSymbolToken(type, charOffset); |
+ Token copy() => isEof |
+ ? new SymbolToken.eof(charOffset, precedingComments) |
+ : new SyntheticSymbolToken(type, charOffset, precedingComments); |
} |
/** |
@@ -197,7 +72,9 @@ class BeginGroupToken extends SymbolToken |
implements analyzer.BeginTokenWithComment { |
Token endGroup; |
- BeginGroupToken(TokenType type, int charOffset) : super(type, charOffset); |
+ BeginGroupToken(TokenType type, int charOffset, |
+ [analyzer.CommentToken precedingComments]) |
+ : super(type, charOffset, precedingComments); |
@override |
analyzer.Token get endToken => endGroup; |
@@ -208,59 +85,7 @@ class BeginGroupToken extends SymbolToken |
} |
@override |
- Token copyWithoutComments() => new BeginGroupToken(type, charOffset); |
-} |
- |
-/** |
- * A keyword token. |
- */ |
-class KeywordToken extends Token implements analyzer.KeywordTokenWithComment { |
- final analyzer.Keyword keyword; |
- |
- KeywordToken(this.keyword, int charOffset) : super(charOffset); |
- |
- @override |
- String get lexeme => keyword.lexeme; |
- |
- @override |
- bool get isIdentifier => keyword.isPseudo || keyword.isBuiltIn; |
- |
- @override |
- bool get isBuiltInIdentifier => keyword.isBuiltIn; |
- |
- @override |
- String toString() => "KeywordToken($lexeme)"; |
- |
- @override |
- Token copyWithoutComments() => new KeywordToken(keyword, charOffset); |
- |
- @override |
- analyzer.Keyword value() => keyword; |
- |
- @override |
- analyzer.TokenType get type => keyword; |
-} |
- |
-/** |
- * A synthetic keyword token. |
- */ |
-class SyntheticKeywordToken extends KeywordToken |
- implements analyzer.SyntheticKeywordToken { |
- /** |
- * Initialize a newly created token to represent the given [keyword] at the |
- * given [offset]. |
- */ |
- SyntheticKeywordToken(analyzer.Keyword keyword, int offset) |
- : super(keyword, offset); |
- |
- @override |
- bool get isSynthetic => true; |
- |
- @override |
- int get length => 0; |
- |
- @override |
- Token copyWithoutComments() => new SyntheticKeywordToken(keyword, offset); |
+ Token copy() => new BeginGroupToken(type, charOffset, precedingComments); |
} |
/** |
@@ -268,7 +93,8 @@ class SyntheticKeywordToken extends KeywordToken |
* number literals, comments, and error tokens, using the corresponding |
* precedence info. |
*/ |
-class StringToken extends Token implements analyzer.StringTokenWithComment { |
+class StringToken extends analyzer.TokenWithComment |
+ implements analyzer.StringTokenWithComment { |
/** |
* The length threshold above which substring tokens are computed lazily. |
* |
@@ -281,27 +107,24 @@ class StringToken extends Token implements analyzer.StringTokenWithComment { |
var /* String | LazySubtring */ valueOrLazySubstring; |
- @override |
- final TokenType type; |
- |
/** |
* Creates a non-lazy string token. If [canonicalize] is true, the string |
* is canonicalized before the token is created. |
*/ |
- StringToken.fromString(this.type, String value, int charOffset, |
- {bool canonicalize: false}) |
+ StringToken.fromString(TokenType type, String value, int charOffset, |
+ {bool canonicalize: false, analyzer.CommentToken precedingComments}) |
: valueOrLazySubstring = |
canonicalizedString(value, 0, value.length, canonicalize), |
- super(charOffset); |
+ super(type, charOffset, precedingComments); |
/** |
* Creates a lazy string token. If [canonicalize] is true, the string |
* is canonicalized before the token is created. |
*/ |
StringToken.fromSubstring( |
- this.type, String data, int start, int end, int charOffset, |
- {bool canonicalize: false}) |
- : super(charOffset) { |
+ TokenType type, String data, int start, int end, int charOffset, |
+ {bool canonicalize: false, analyzer.CommentToken precedingComments}) |
+ : super(type, charOffset, precedingComments) { |
int length = end - start; |
if (length <= LAZY_THRESHOLD) { |
valueOrLazySubstring = |
@@ -316,9 +139,10 @@ class StringToken extends Token implements analyzer.StringTokenWithComment { |
* Creates a lazy string token. If [asciiOnly] is false, the byte array |
* is passed through a UTF-8 decoder. |
*/ |
- StringToken.fromUtf8Bytes(this.type, List<int> data, int start, int end, |
- bool asciiOnly, int charOffset) |
- : super(charOffset) { |
+ StringToken.fromUtf8Bytes(TokenType type, List<int> data, int start, int end, |
+ bool asciiOnly, int charOffset, |
+ {analyzer.CommentToken precedingComments}) |
+ : super(type, charOffset, precedingComments) { |
int length = end - start; |
if (length <= LAZY_THRESHOLD) { |
valueOrLazySubstring = decodeUtf8(data, start, end, asciiOnly); |
@@ -327,8 +151,9 @@ class StringToken extends Token implements analyzer.StringTokenWithComment { |
} |
} |
- StringToken._(this.type, this.valueOrLazySubstring, int charOffset) |
- : super(charOffset); |
+ StringToken._(TokenType type, this.valueOrLazySubstring, int charOffset, |
+ [analyzer.CommentToken precedingComments]) |
+ : super(type, charOffset, precedingComments); |
@override |
String get lexeme { |
@@ -369,8 +194,8 @@ class StringToken extends Token implements analyzer.StringTokenWithComment { |
} |
@override |
- Token copyWithoutComments() => |
- new StringToken._(type, valueOrLazySubstring, charOffset); |
+ Token copy() => new StringToken._( |
+ type, valueOrLazySubstring, charOffset, precedingComments); |
@override |
String value() => lexeme; |
@@ -381,18 +206,16 @@ class StringToken extends Token implements analyzer.StringTokenWithComment { |
*/ |
class SyntheticStringToken extends StringToken |
implements analyzer.SyntheticStringToken { |
- SyntheticStringToken(TokenType type, String value, int offset) |
- : super._(type, value, offset); |
- |
- @override |
- bool get isSynthetic => true; |
+ SyntheticStringToken(TokenType type, String value, int offset, |
+ [analyzer.CommentToken precedingComments]) |
+ : super._(type, value, offset, precedingComments); |
@override |
int get length => 0; |
@override |
- Token copyWithoutComments() => |
- new SyntheticStringToken(type, valueOrLazySubstring, offset); |
+ Token copy() => new SyntheticStringToken( |
+ type, valueOrLazySubstring, offset, precedingComments); |
} |
class CommentToken extends StringToken implements analyzer.CommentToken { |