OLD | NEW |
1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 library engine.scanner; | 5 library engine.scanner; |
6 | 6 |
7 import 'dart:collection'; | 7 import 'dart:collection'; |
8 | 8 |
9 import 'error.dart'; | 9 import 'error.dart'; |
10 import 'java_engine.dart'; | 10 import 'java_engine.dart'; |
(...skipping 357 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
368 | 368 |
369 static const Keyword PART = const Keyword('PART', "part", true); | 369 static const Keyword PART = const Keyword('PART', "part", true); |
370 | 370 |
371 static const Keyword SET = const Keyword('SET', "set", true); | 371 static const Keyword SET = const Keyword('SET', "set", true); |
372 | 372 |
373 static const Keyword STATIC = const Keyword('STATIC', "static", true); | 373 static const Keyword STATIC = const Keyword('STATIC', "static", true); |
374 | 374 |
375 static const Keyword TYPEDEF = const Keyword('TYPEDEF', "typedef", true); | 375 static const Keyword TYPEDEF = const Keyword('TYPEDEF', "typedef", true); |
376 | 376 |
377 static const List<Keyword> values = const [ | 377 static const List<Keyword> values = const [ |
378 ASSERT, | 378 ASSERT, |
379 BREAK, | 379 BREAK, |
380 CASE, | 380 CASE, |
381 CATCH, | 381 CATCH, |
382 CLASS, | 382 CLASS, |
383 CONST, | 383 CONST, |
384 CONTINUE, | 384 CONTINUE, |
385 DEFAULT, | 385 DEFAULT, |
386 DO, | 386 DO, |
387 ELSE, | 387 ELSE, |
388 ENUM, | 388 ENUM, |
389 EXTENDS, | 389 EXTENDS, |
390 FALSE, | 390 FALSE, |
391 FINAL, | 391 FINAL, |
392 FINALLY, | 392 FINALLY, |
393 FOR, | 393 FOR, |
394 IF, | 394 IF, |
395 IN, | 395 IN, |
396 IS, | 396 IS, |
397 NEW, | 397 NEW, |
398 NULL, | 398 NULL, |
399 RETHROW, | 399 RETHROW, |
400 RETURN, | 400 RETURN, |
401 SUPER, | 401 SUPER, |
402 SWITCH, | 402 SWITCH, |
403 THIS, | 403 THIS, |
404 THROW, | 404 THROW, |
405 TRUE, | 405 TRUE, |
406 TRY, | 406 TRY, |
407 VAR, | 407 VAR, |
408 VOID, | 408 VOID, |
409 WHILE, | 409 WHILE, |
410 WITH, | 410 WITH, |
411 ABSTRACT, | 411 ABSTRACT, |
412 AS, | 412 AS, |
413 DEFERRED, | 413 DEFERRED, |
414 DYNAMIC, | 414 DYNAMIC, |
415 EXPORT, | 415 EXPORT, |
416 EXTERNAL, | 416 EXTERNAL, |
417 FACTORY, | 417 FACTORY, |
418 GET, | 418 GET, |
419 IMPLEMENTS, | 419 IMPLEMENTS, |
420 IMPORT, | 420 IMPORT, |
421 LIBRARY, | 421 LIBRARY, |
422 OPERATOR, | 422 OPERATOR, |
423 PART, | 423 PART, |
424 SET, | 424 SET, |
425 STATIC, | 425 STATIC, |
426 TYPEDEF]; | 426 TYPEDEF |
| 427 ]; |
427 | 428 |
428 /** | 429 /** |
429 * A table mapping the lexemes of keywords to the corresponding keyword. | 430 * A table mapping the lexemes of keywords to the corresponding keyword. |
430 */ | 431 */ |
431 static final Map<String, Keyword> keywords = _createKeywordMap(); | 432 static final Map<String, Keyword> keywords = _createKeywordMap(); |
432 | 433 |
433 /** | 434 /** |
434 * The name of the keyword type. | 435 * The name of the keyword type. |
435 */ | 436 */ |
436 final String name; | 437 final String name; |
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
526 * [start] the index of the character in the strings used to transition to a | 527 * [start] the index of the character in the strings used to transition to a |
527 * new state | 528 * new state |
528 * [strings] an array containing all of the strings that will be recognized by | 529 * [strings] an array containing all of the strings that will be recognized by |
529 * the state machine | 530 * the state machine |
530 * [offset] the offset of the first string in the array that has the prefix | 531 * [offset] the offset of the first string in the array that has the prefix |
531 * that is assumed to have been recognized by the time we reach the state | 532 * that is assumed to have been recognized by the time we reach the state |
532 * being built | 533 * being built |
533 * [length] the number of strings in the array that pass through the state | 534 * [length] the number of strings in the array that pass through the state |
534 * being built | 535 * being built |
535 */ | 536 */ |
536 static KeywordState _computeKeywordStateTable(int start, List<String> strings, | 537 static KeywordState _computeKeywordStateTable( |
537 int offset, int length) { | 538 int start, List<String> strings, int offset, int length) { |
538 List<KeywordState> result = new List<KeywordState>(26); | 539 List<KeywordState> result = new List<KeywordState>(26); |
539 assert(length != 0); | 540 assert(length != 0); |
540 int chunk = 0x0; | 541 int chunk = 0x0; |
541 int chunkStart = -1; | 542 int chunkStart = -1; |
542 bool isLeaf = false; | 543 bool isLeaf = false; |
543 for (int i = offset; i < offset + length; i++) { | 544 for (int i = offset; i < offset + length; i++) { |
544 if (strings[i].length == start) { | 545 if (strings[i].length == start) { |
545 isLeaf = true; | 546 isLeaf = true; |
546 } | 547 } |
547 if (strings[i].length > start) { | 548 if (strings[i].length > start) { |
548 int c = strings[i].codeUnitAt(start); | 549 int c = strings[i].codeUnitAt(start); |
549 if (chunk != c) { | 550 if (chunk != c) { |
550 if (chunkStart != -1) { | 551 if (chunkStart != -1) { |
551 result[chunk - 0x61] = | 552 result[chunk - 0x61] = _computeKeywordStateTable( |
552 _computeKeywordStateTable(start + 1, strings, chunkStart, i - ch
unkStart); | 553 start + 1, strings, chunkStart, i - chunkStart); |
553 } | 554 } |
554 chunkStart = i; | 555 chunkStart = i; |
555 chunk = c; | 556 chunk = c; |
556 } | 557 } |
557 } | 558 } |
558 } | 559 } |
559 if (chunkStart != -1) { | 560 if (chunkStart != -1) { |
560 assert(result[chunk - 0x61] == null); | 561 assert(result[chunk - 0x61] == null); |
561 result[chunk - | 562 result[chunk - 0x61] = _computeKeywordStateTable( |
562 0x61] = _computeKeywordStateTable( | 563 start + 1, strings, chunkStart, offset + length - chunkStart); |
563 start + 1, | |
564 strings, | |
565 chunkStart, | |
566 offset + length - chunkStart); | |
567 } else { | 564 } else { |
568 assert(length == 1); | 565 assert(length == 1); |
569 return new KeywordState(_EMPTY_TABLE, strings[offset]); | 566 return new KeywordState(_EMPTY_TABLE, strings[offset]); |
570 } | 567 } |
571 if (isLeaf) { | 568 if (isLeaf) { |
572 return new KeywordState(result, strings[offset]); | 569 return new KeywordState(result, strings[offset]); |
573 } else { | 570 } else { |
574 return new KeywordState(result, null); | 571 return new KeywordState(result, null); |
575 } | 572 } |
576 } | 573 } |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
644 void applyDelta(int delta) { | 641 void applyDelta(int delta) { |
645 super.applyDelta(delta); | 642 super.applyDelta(delta); |
646 Token token = precedingComments; | 643 Token token = precedingComments; |
647 while (token != null) { | 644 while (token != null) { |
648 token.applyDelta(delta); | 645 token.applyDelta(delta); |
649 token = token.next; | 646 token = token.next; |
650 } | 647 } |
651 } | 648 } |
652 | 649 |
653 @override | 650 @override |
654 Token copy() => | 651 Token copy() => new KeywordTokenWithComment( |
655 new KeywordTokenWithComment(keyword, offset, copyComments(precedingComment
s)); | 652 keyword, offset, copyComments(precedingComments)); |
656 } | 653 } |
657 | 654 |
658 /** | 655 /** |
659 * The class `Scanner` implements a scanner for Dart code. | 656 * The class `Scanner` implements a scanner for Dart code. |
660 * | 657 * |
661 * The lexical structure of Dart is ambiguous without knowledge of the context | 658 * The lexical structure of Dart is ambiguous without knowledge of the context |
662 * in which a token is being scanned. For example, without context we cannot | 659 * in which a token is being scanned. For example, without context we cannot |
663 * determine whether source of the form "<<" should be scanned as a single | 660 * determine whether source of the form "<<" should be scanned as a single |
664 * left-shift operator or as two left angle brackets. This scanner does not have | 661 * left-shift operator or as two left angle brackets. This scanner does not have |
665 * any context, so it always resolves such conflicts by scanning the longest | 662 * any context, so it always resolves such conflicts by scanning the longest |
(...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
884 if (next == 0x3B) { | 881 if (next == 0x3B) { |
885 _appendTokenOfType(TokenType.SEMICOLON); | 882 _appendTokenOfType(TokenType.SEMICOLON); |
886 return _reader.advance(); | 883 return _reader.advance(); |
887 } | 884 } |
888 if (next == 0x3F) { | 885 if (next == 0x3F) { |
889 _appendTokenOfType(TokenType.QUESTION); | 886 _appendTokenOfType(TokenType.QUESTION); |
890 return _reader.advance(); | 887 return _reader.advance(); |
891 } | 888 } |
892 if (next == 0x5D) { | 889 if (next == 0x5D) { |
893 _appendEndToken( | 890 _appendEndToken( |
894 TokenType.CLOSE_SQUARE_BRACKET, | 891 TokenType.CLOSE_SQUARE_BRACKET, TokenType.OPEN_SQUARE_BRACKET); |
895 TokenType.OPEN_SQUARE_BRACKET); | |
896 return _reader.advance(); | 892 return _reader.advance(); |
897 } | 893 } |
898 if (next == 0x60) { | 894 if (next == 0x60) { |
899 _appendTokenOfType(TokenType.BACKPING); | 895 _appendTokenOfType(TokenType.BACKPING); |
900 return _reader.advance(); | 896 return _reader.advance(); |
901 } | 897 } |
902 if (next == 0x7B) { | 898 if (next == 0x7B) { |
903 _appendBeginToken(TokenType.OPEN_CURLY_BRACKET); | 899 _appendBeginToken(TokenType.OPEN_CURLY_BRACKET); |
904 return _reader.advance(); | 900 return _reader.advance(); |
905 } | 901 } |
906 if (next == 0x7D) { | 902 if (next == 0x7D) { |
907 _appendEndToken( | 903 _appendEndToken( |
908 TokenType.CLOSE_CURLY_BRACKET, | 904 TokenType.CLOSE_CURLY_BRACKET, TokenType.OPEN_CURLY_BRACKET); |
909 TokenType.OPEN_CURLY_BRACKET); | |
910 return _reader.advance(); | 905 return _reader.advance(); |
911 } | 906 } |
912 if (next == 0x2F) { | 907 if (next == 0x2F) { |
913 return _tokenizeSlashOrComment(next); | 908 return _tokenizeSlashOrComment(next); |
914 } | 909 } |
915 if (next == 0x40) { | 910 if (next == 0x40) { |
916 _appendTokenOfType(TokenType.AT); | 911 _appendTokenOfType(TokenType.AT); |
917 return _reader.advance(); | 912 return _reader.advance(); |
918 } | 913 } |
919 if (next == 0x22 || next == 0x27) { | 914 if (next == 0x22 || next == 0x27) { |
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1031 _groupingStack.removeAt(_stackEnd--); | 1026 _groupingStack.removeAt(_stackEnd--); |
1032 } | 1027 } |
1033 } | 1028 } |
1034 } | 1029 } |
1035 | 1030 |
1036 void _appendEofToken() { | 1031 void _appendEofToken() { |
1037 Token eofToken; | 1032 Token eofToken; |
1038 if (_firstComment == null) { | 1033 if (_firstComment == null) { |
1039 eofToken = new Token(TokenType.EOF, _reader.offset + 1); | 1034 eofToken = new Token(TokenType.EOF, _reader.offset + 1); |
1040 } else { | 1035 } else { |
1041 eofToken = | 1036 eofToken = new TokenWithComment( |
1042 new TokenWithComment(TokenType.EOF, _reader.offset + 1, _firstComment)
; | 1037 TokenType.EOF, _reader.offset + 1, _firstComment); |
1043 _firstComment = null; | 1038 _firstComment = null; |
1044 _lastComment = null; | 1039 _lastComment = null; |
1045 } | 1040 } |
1046 // The EOF token points to itself so that there is always infinite | 1041 // The EOF token points to itself so that there is always infinite |
1047 // look-ahead. | 1042 // look-ahead. |
1048 eofToken.setNext(eofToken); | 1043 eofToken.setNext(eofToken); |
1049 _tail = _tail.setNext(eofToken); | 1044 _tail = _tail.setNext(eofToken); |
1050 if (_stackEnd >= 0) { | 1045 if (_stackEnd >= 0) { |
1051 _hasUnmatchedGroups = true; | 1046 _hasUnmatchedGroups = true; |
1052 // TODO(brianwilkerson) Fix the ungrouped tokens? | 1047 // TODO(brianwilkerson) Fix the ungrouped tokens? |
(...skipping 19 matching lines...) Expand all Loading... |
1072 new StringTokenWithComment(type, value, _tokenStart, _firstComment)); | 1067 new StringTokenWithComment(type, value, _tokenStart, _firstComment)); |
1073 _firstComment = null; | 1068 _firstComment = null; |
1074 _lastComment = null; | 1069 _lastComment = null; |
1075 } | 1070 } |
1076 } | 1071 } |
1077 | 1072 |
1078 void _appendStringTokenWithOffset(TokenType type, String value, int offset) { | 1073 void _appendStringTokenWithOffset(TokenType type, String value, int offset) { |
1079 if (_firstComment == null) { | 1074 if (_firstComment == null) { |
1080 _tail = _tail.setNext(new StringToken(type, value, _tokenStart + offset)); | 1075 _tail = _tail.setNext(new StringToken(type, value, _tokenStart + offset)); |
1081 } else { | 1076 } else { |
1082 _tail = _tail.setNext( | 1077 _tail = _tail.setNext(new StringTokenWithComment( |
1083 new StringTokenWithComment(type, value, _tokenStart + offset, _firstCo
mment)); | 1078 type, value, _tokenStart + offset, _firstComment)); |
1084 _firstComment = null; | 1079 _firstComment = null; |
1085 _lastComment = null; | 1080 _lastComment = null; |
1086 } | 1081 } |
1087 } | 1082 } |
1088 | 1083 |
1089 void _appendTokenOfType(TokenType type) { | 1084 void _appendTokenOfType(TokenType type) { |
1090 if (_firstComment == null) { | 1085 if (_firstComment == null) { |
1091 _tail = _tail.setNext(new Token(type, _tokenStart)); | 1086 _tail = _tail.setNext(new Token(type, _tokenStart)); |
1092 } else { | 1087 } else { |
1093 _tail = | 1088 _tail = |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1134 return null; | 1129 return null; |
1135 } | 1130 } |
1136 | 1131 |
1137 /** | 1132 /** |
1138 * Report an error at the current offset. | 1133 * Report an error at the current offset. |
1139 * | 1134 * |
1140 * [errorCode] the error code indicating the nature of the error | 1135 * [errorCode] the error code indicating the nature of the error |
1141 * [arguments] any arguments needed to complete the error message | 1136 * [arguments] any arguments needed to complete the error message |
1142 */ | 1137 */ |
1143 void _reportError(ScannerErrorCode errorCode, [List<Object> arguments]) { | 1138 void _reportError(ScannerErrorCode errorCode, [List<Object> arguments]) { |
1144 _errorListener.onError( | 1139 _errorListener.onError(new AnalysisError.con2( |
1145 new AnalysisError.con2(source, _reader.offset, 1, errorCode, arguments))
; | 1140 source, _reader.offset, 1, errorCode, arguments)); |
1146 } | 1141 } |
1147 | 1142 |
1148 int _select(int choice, TokenType yesType, TokenType noType) { | 1143 int _select(int choice, TokenType yesType, TokenType noType) { |
1149 int next = _reader.advance(); | 1144 int next = _reader.advance(); |
1150 if (next == choice) { | 1145 if (next == choice) { |
1151 _appendTokenOfType(yesType); | 1146 _appendTokenOfType(yesType); |
1152 return _reader.advance(); | 1147 return _reader.advance(); |
1153 } else { | 1148 } else { |
1154 _appendTokenOfType(noType); | 1149 _appendTokenOfType(noType); |
1155 return next; | 1150 return next; |
1156 } | 1151 } |
1157 } | 1152 } |
1158 | 1153 |
1159 int _selectWithOffset(int choice, TokenType yesType, TokenType noType, | 1154 int _selectWithOffset( |
1160 int offset) { | 1155 int choice, TokenType yesType, TokenType noType, int offset) { |
1161 int next = _reader.advance(); | 1156 int next = _reader.advance(); |
1162 if (next == choice) { | 1157 if (next == choice) { |
1163 _appendTokenOfTypeWithOffset(yesType, offset); | 1158 _appendTokenOfTypeWithOffset(yesType, offset); |
1164 return _reader.advance(); | 1159 return _reader.advance(); |
1165 } else { | 1160 } else { |
1166 _appendTokenOfTypeWithOffset(noType, offset); | 1161 _appendTokenOfTypeWithOffset(noType, offset); |
1167 return next; | 1162 return next; |
1168 } | 1163 } |
1169 } | 1164 } |
1170 | 1165 |
(...skipping 30 matching lines...) Expand all Loading... |
1201 int _tokenizeCaret(int next) => | 1196 int _tokenizeCaret(int next) => |
1202 _select(0x3D, TokenType.CARET_EQ, TokenType.CARET); | 1197 _select(0x3D, TokenType.CARET_EQ, TokenType.CARET); |
1203 | 1198 |
1204 int _tokenizeDotOrNumber(int next) { | 1199 int _tokenizeDotOrNumber(int next) { |
1205 int start = _reader.offset; | 1200 int start = _reader.offset; |
1206 next = _reader.advance(); | 1201 next = _reader.advance(); |
1207 if (0x30 <= next && next <= 0x39) { | 1202 if (0x30 <= next && next <= 0x39) { |
1208 return _tokenizeFractionPart(next, start); | 1203 return _tokenizeFractionPart(next, start); |
1209 } else if (0x2E == next) { | 1204 } else if (0x2E == next) { |
1210 return _select( | 1205 return _select( |
1211 0x2E, | 1206 0x2E, TokenType.PERIOD_PERIOD_PERIOD, TokenType.PERIOD_PERIOD); |
1212 TokenType.PERIOD_PERIOD_PERIOD, | |
1213 TokenType.PERIOD_PERIOD); | |
1214 } else { | 1207 } else { |
1215 _appendTokenOfType(TokenType.PERIOD); | 1208 _appendTokenOfType(TokenType.PERIOD); |
1216 return next; | 1209 return next; |
1217 } | 1210 } |
1218 } | 1211 } |
1219 | 1212 |
1220 int _tokenizeEquals(int next) { | 1213 int _tokenizeEquals(int next) { |
1221 // = == => | 1214 // = == => |
1222 next = _reader.advance(); | 1215 next = _reader.advance(); |
1223 if (next == 0x3D) { | 1216 if (next == 0x3D) { |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1273 continue LOOP; | 1266 continue LOOP; |
1274 } else { | 1267 } else { |
1275 done = true; | 1268 done = true; |
1276 continue LOOP; | 1269 continue LOOP; |
1277 } | 1270 } |
1278 next = _reader.advance(); | 1271 next = _reader.advance(); |
1279 } | 1272 } |
1280 if (!hasDigit) { | 1273 if (!hasDigit) { |
1281 _appendStringToken(TokenType.INT, _reader.getString(start, -2)); | 1274 _appendStringToken(TokenType.INT, _reader.getString(start, -2)); |
1282 if (0x2E == next) { | 1275 if (0x2E == next) { |
1283 return _selectWithOffset( | 1276 return _selectWithOffset(0x2E, TokenType.PERIOD_PERIOD_PERIOD, |
1284 0x2E, | 1277 TokenType.PERIOD_PERIOD, _reader.offset - 1); |
1285 TokenType.PERIOD_PERIOD_PERIOD, | |
1286 TokenType.PERIOD_PERIOD, | |
1287 _reader.offset - 1); | |
1288 } | 1278 } |
1289 _appendTokenOfTypeWithOffset(TokenType.PERIOD, _reader.offset - 1); | 1279 _appendTokenOfTypeWithOffset(TokenType.PERIOD, _reader.offset - 1); |
1290 return bigSwitch(next); | 1280 return bigSwitch(next); |
1291 } | 1281 } |
1292 _appendStringToken( | 1282 _appendStringToken( |
1293 TokenType.DOUBLE, | 1283 TokenType.DOUBLE, _reader.getString(start, next < 0 ? 0 : -1)); |
1294 _reader.getString(start, next < 0 ? 0 : -1)); | |
1295 return next; | 1284 return next; |
1296 } | 1285 } |
1297 | 1286 |
1298 int _tokenizeGreaterThan(int next) { | 1287 int _tokenizeGreaterThan(int next) { |
1299 // > >= >> >>= | 1288 // > >= >> >>= |
1300 next = _reader.advance(); | 1289 next = _reader.advance(); |
1301 if (0x3D == next) { | 1290 if (0x3D == next) { |
1302 _appendTokenOfType(TokenType.GT_EQ); | 1291 _appendTokenOfType(TokenType.GT_EQ); |
1303 return _reader.advance(); | 1292 return _reader.advance(); |
1304 } else if (0x3E == next) { | 1293 } else if (0x3E == next) { |
(...skipping 18 matching lines...) Expand all Loading... |
1323 next = _reader.advance(); | 1312 next = _reader.advance(); |
1324 if ((0x30 <= next && next <= 0x39) || | 1313 if ((0x30 <= next && next <= 0x39) || |
1325 (0x41 <= next && next <= 0x46) || | 1314 (0x41 <= next && next <= 0x46) || |
1326 (0x61 <= next && next <= 0x66)) { | 1315 (0x61 <= next && next <= 0x66)) { |
1327 hasDigits = true; | 1316 hasDigits = true; |
1328 } else { | 1317 } else { |
1329 if (!hasDigits) { | 1318 if (!hasDigits) { |
1330 _reportError(ScannerErrorCode.MISSING_HEX_DIGIT); | 1319 _reportError(ScannerErrorCode.MISSING_HEX_DIGIT); |
1331 } | 1320 } |
1332 _appendStringToken( | 1321 _appendStringToken( |
1333 TokenType.HEXADECIMAL, | 1322 TokenType.HEXADECIMAL, _reader.getString(start, next < 0 ? 0 : -1)); |
1334 _reader.getString(start, next < 0 ? 0 : -1)); | |
1335 return next; | 1323 return next; |
1336 } | 1324 } |
1337 } | 1325 } |
1338 } | 1326 } |
1339 | 1327 |
1340 int _tokenizeHexOrNumber(int next) { | 1328 int _tokenizeHexOrNumber(int next) { |
1341 int x = _reader.peek(); | 1329 int x = _reader.peek(); |
1342 if (x == 0x78 || x == 0x58) { | 1330 if (x == 0x78 || x == 0x58) { |
1343 _reader.advance(); | 1331 _reader.advance(); |
1344 return _tokenizeHex(x); | 1332 return _tokenizeHex(x); |
1345 } | 1333 } |
1346 return _tokenizeNumber(next); | 1334 return _tokenizeNumber(next); |
1347 } | 1335 } |
1348 | 1336 |
1349 int _tokenizeIdentifier(int next, int start, bool allowDollar) { | 1337 int _tokenizeIdentifier(int next, int start, bool allowDollar) { |
1350 while ((0x61 <= next && next <= 0x7A) || | 1338 while ((0x61 <= next && next <= 0x7A) || |
1351 (0x41 <= next && next <= 0x5A) || | 1339 (0x41 <= next && next <= 0x5A) || |
1352 (0x30 <= next && next <= 0x39) || | 1340 (0x30 <= next && next <= 0x39) || |
1353 next == 0x5F || | 1341 next == 0x5F || |
1354 (next == 0x24 && allowDollar)) { | 1342 (next == 0x24 && allowDollar)) { |
1355 next = _reader.advance(); | 1343 next = _reader.advance(); |
1356 } | 1344 } |
1357 _appendStringToken( | 1345 _appendStringToken( |
1358 TokenType.IDENTIFIER, | 1346 TokenType.IDENTIFIER, _reader.getString(start, next < 0 ? 0 : -1)); |
1359 _reader.getString(start, next < 0 ? 0 : -1)); | |
1360 return next; | 1347 return next; |
1361 } | 1348 } |
1362 | 1349 |
1363 int _tokenizeInterpolatedExpression(int next, int start) { | 1350 int _tokenizeInterpolatedExpression(int next, int start) { |
1364 _appendBeginToken(TokenType.STRING_INTERPOLATION_EXPRESSION); | 1351 _appendBeginToken(TokenType.STRING_INTERPOLATION_EXPRESSION); |
1365 next = _reader.advance(); | 1352 next = _reader.advance(); |
1366 while (next != -1) { | 1353 while (next != -1) { |
1367 if (next == 0x7D) { | 1354 if (next == 0x7D) { |
1368 BeginToken begin = | 1355 BeginToken begin = |
1369 _findTokenMatchingClosingBraceInInterpolationExpression(); | 1356 _findTokenMatchingClosingBraceInInterpolationExpression(); |
1370 if (begin == null) { | 1357 if (begin == null) { |
1371 _beginToken(); | 1358 _beginToken(); |
1372 _appendTokenOfType(TokenType.CLOSE_CURLY_BRACKET); | 1359 _appendTokenOfType(TokenType.CLOSE_CURLY_BRACKET); |
1373 next = _reader.advance(); | 1360 next = _reader.advance(); |
1374 _beginToken(); | 1361 _beginToken(); |
1375 return next; | 1362 return next; |
1376 } else if (begin.type == TokenType.OPEN_CURLY_BRACKET) { | 1363 } else if (begin.type == TokenType.OPEN_CURLY_BRACKET) { |
1377 _beginToken(); | 1364 _beginToken(); |
1378 _appendEndToken( | 1365 _appendEndToken( |
1379 TokenType.CLOSE_CURLY_BRACKET, | 1366 TokenType.CLOSE_CURLY_BRACKET, TokenType.OPEN_CURLY_BRACKET); |
1380 TokenType.OPEN_CURLY_BRACKET); | |
1381 next = _reader.advance(); | 1367 next = _reader.advance(); |
1382 _beginToken(); | 1368 _beginToken(); |
1383 } else if (begin.type == TokenType.STRING_INTERPOLATION_EXPRESSION) { | 1369 } else if (begin.type == TokenType.STRING_INTERPOLATION_EXPRESSION) { |
1384 _beginToken(); | 1370 _beginToken(); |
1385 _appendEndToken( | 1371 _appendEndToken(TokenType.CLOSE_CURLY_BRACKET, |
1386 TokenType.CLOSE_CURLY_BRACKET, | |
1387 TokenType.STRING_INTERPOLATION_EXPRESSION); | 1372 TokenType.STRING_INTERPOLATION_EXPRESSION); |
1388 next = _reader.advance(); | 1373 next = _reader.advance(); |
1389 _beginToken(); | 1374 _beginToken(); |
1390 return next; | 1375 return next; |
1391 } | 1376 } |
1392 } else { | 1377 } else { |
1393 next = bigSwitch(next); | 1378 next = bigSwitch(next); |
1394 } | 1379 } |
1395 } | 1380 } |
1396 return next; | 1381 return next; |
1397 } | 1382 } |
1398 | 1383 |
1399 int _tokenizeInterpolatedIdentifier(int next, int start) { | 1384 int _tokenizeInterpolatedIdentifier(int next, int start) { |
1400 _appendStringTokenWithOffset( | 1385 _appendStringTokenWithOffset( |
1401 TokenType.STRING_INTERPOLATION_IDENTIFIER, | 1386 TokenType.STRING_INTERPOLATION_IDENTIFIER, "\$", 0); |
1402 "\$", | |
1403 0); | |
1404 if ((0x41 <= next && next <= 0x5A) || | 1387 if ((0x41 <= next && next <= 0x5A) || |
1405 (0x61 <= next && next <= 0x7A) || | 1388 (0x61 <= next && next <= 0x7A) || |
1406 next == 0x5F) { | 1389 next == 0x5F) { |
1407 _beginToken(); | 1390 _beginToken(); |
1408 next = _tokenizeKeywordOrIdentifier(next, false); | 1391 next = _tokenizeKeywordOrIdentifier(next, false); |
1409 } | 1392 } |
1410 _beginToken(); | 1393 _beginToken(); |
1411 return next; | 1394 return next; |
1412 } | 1395 } |
1413 | 1396 |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1463 } | 1446 } |
1464 } | 1447 } |
1465 | 1448 |
1466 int _tokenizeMultiLineComment(int next) { | 1449 int _tokenizeMultiLineComment(int next) { |
1467 int nesting = 1; | 1450 int nesting = 1; |
1468 next = _reader.advance(); | 1451 next = _reader.advance(); |
1469 while (true) { | 1452 while (true) { |
1470 if (-1 == next) { | 1453 if (-1 == next) { |
1471 _reportError(ScannerErrorCode.UNTERMINATED_MULTI_LINE_COMMENT); | 1454 _reportError(ScannerErrorCode.UNTERMINATED_MULTI_LINE_COMMENT); |
1472 _appendCommentToken( | 1455 _appendCommentToken( |
1473 TokenType.MULTI_LINE_COMMENT, | 1456 TokenType.MULTI_LINE_COMMENT, _reader.getString(_tokenStart, 0)); |
1474 _reader.getString(_tokenStart, 0)); | |
1475 return next; | 1457 return next; |
1476 } else if (0x2A == next) { | 1458 } else if (0x2A == next) { |
1477 next = _reader.advance(); | 1459 next = _reader.advance(); |
1478 if (0x2F == next) { | 1460 if (0x2F == next) { |
1479 --nesting; | 1461 --nesting; |
1480 if (0 == nesting) { | 1462 if (0 == nesting) { |
1481 _appendCommentToken( | 1463 _appendCommentToken(TokenType.MULTI_LINE_COMMENT, |
1482 TokenType.MULTI_LINE_COMMENT, | |
1483 _reader.getString(_tokenStart, 0)); | 1464 _reader.getString(_tokenStart, 0)); |
1484 return _reader.advance(); | 1465 return _reader.advance(); |
1485 } else { | 1466 } else { |
1486 next = _reader.advance(); | 1467 next = _reader.advance(); |
1487 } | 1468 } |
1488 } | 1469 } |
1489 } else if (0x2F == next) { | 1470 } else if (0x2F == next) { |
1490 next = _reader.advance(); | 1471 next = _reader.advance(); |
1491 if (0x2A == next) { | 1472 if (0x2A == next) { |
1492 next = _reader.advance(); | 1473 next = _reader.advance(); |
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1610 while (true) { | 1591 while (true) { |
1611 next = _reader.advance(); | 1592 next = _reader.advance(); |
1612 if (0x30 <= next && next <= 0x39) { | 1593 if (0x30 <= next && next <= 0x39) { |
1613 continue; | 1594 continue; |
1614 } else if (next == 0x2E) { | 1595 } else if (next == 0x2E) { |
1615 return _tokenizeFractionPart(_reader.advance(), start); | 1596 return _tokenizeFractionPart(_reader.advance(), start); |
1616 } else if (next == 0x65 || next == 0x45) { | 1597 } else if (next == 0x65 || next == 0x45) { |
1617 return _tokenizeFractionPart(next, start); | 1598 return _tokenizeFractionPart(next, start); |
1618 } else { | 1599 } else { |
1619 _appendStringToken( | 1600 _appendStringToken( |
1620 TokenType.INT, | 1601 TokenType.INT, _reader.getString(start, next < 0 ? 0 : -1)); |
1621 _reader.getString(start, next < 0 ? 0 : -1)); | |
1622 return next; | 1602 return next; |
1623 } | 1603 } |
1624 } | 1604 } |
1625 } | 1605 } |
1626 | 1606 |
1627 int _tokenizeOpenSquareBracket(int next) { | 1607 int _tokenizeOpenSquareBracket(int next) { |
1628 // [ [] []= | 1608 // [ [] []= |
1629 next = _reader.advance(); | 1609 next = _reader.advance(); |
1630 if (next == 0x5D) { | 1610 if (next == 0x5D) { |
1631 return _select(0x3D, TokenType.INDEX_EQ, TokenType.INDEX); | 1611 return _select(0x3D, TokenType.INDEX_EQ, TokenType.INDEX); |
(...skipping 19 matching lines...) Expand all Loading... |
1651 _appendTokenOfType(TokenType.PLUS); | 1631 _appendTokenOfType(TokenType.PLUS); |
1652 return next; | 1632 return next; |
1653 } | 1633 } |
1654 } | 1634 } |
1655 | 1635 |
1656 int _tokenizeSingleLineComment(int next) { | 1636 int _tokenizeSingleLineComment(int next) { |
1657 while (true) { | 1637 while (true) { |
1658 next = _reader.advance(); | 1638 next = _reader.advance(); |
1659 if (-1 == next) { | 1639 if (-1 == next) { |
1660 _appendCommentToken( | 1640 _appendCommentToken( |
1661 TokenType.SINGLE_LINE_COMMENT, | 1641 TokenType.SINGLE_LINE_COMMENT, _reader.getString(_tokenStart, 0)); |
1662 _reader.getString(_tokenStart, 0)); | |
1663 return next; | 1642 return next; |
1664 } else if (0xA == next || 0xD == next) { | 1643 } else if (0xA == next || 0xD == next) { |
1665 _appendCommentToken( | 1644 _appendCommentToken( |
1666 TokenType.SINGLE_LINE_COMMENT, | 1645 TokenType.SINGLE_LINE_COMMENT, _reader.getString(_tokenStart, -1)); |
1667 _reader.getString(_tokenStart, -1)); | |
1668 return next; | 1646 return next; |
1669 } | 1647 } |
1670 } | 1648 } |
1671 } | 1649 } |
1672 | 1650 |
1673 int _tokenizeSingleLineRawString(int next, int quoteChar, int start) { | 1651 int _tokenizeSingleLineRawString(int next, int quoteChar, int start) { |
1674 next = _reader.advance(); | 1652 next = _reader.advance(); |
1675 while (next != -1) { | 1653 while (next != -1) { |
1676 if (next == quoteChar) { | 1654 if (next == quoteChar) { |
1677 _appendStringToken(TokenType.STRING, _reader.getString(start, 0)); | 1655 _appendStringToken(TokenType.STRING, _reader.getString(start, 0)); |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1763 } | 1741 } |
1764 | 1742 |
1765 int _tokenizeTag(int next) { | 1743 int _tokenizeTag(int next) { |
1766 // # or #!.*[\n\r] | 1744 // # or #!.*[\n\r] |
1767 if (_reader.offset == 0) { | 1745 if (_reader.offset == 0) { |
1768 if (_reader.peek() == 0x21) { | 1746 if (_reader.peek() == 0x21) { |
1769 do { | 1747 do { |
1770 next = _reader.advance(); | 1748 next = _reader.advance(); |
1771 } while (next != 0xA && next != 0xD && next > 0); | 1749 } while (next != 0xA && next != 0xD && next > 0); |
1772 _appendStringToken( | 1750 _appendStringToken( |
1773 TokenType.SCRIPT_TAG, | 1751 TokenType.SCRIPT_TAG, _reader.getString(_tokenStart, 0)); |
1774 _reader.getString(_tokenStart, 0)); | |
1775 return next; | 1752 return next; |
1776 } | 1753 } |
1777 } | 1754 } |
1778 _appendTokenOfType(TokenType.HASH); | 1755 _appendTokenOfType(TokenType.HASH); |
1779 return _reader.advance(); | 1756 return _reader.advance(); |
1780 } | 1757 } |
1781 | 1758 |
1782 int _tokenizeTilde(int next) { | 1759 int _tokenizeTilde(int next) { |
1783 // ~ ~/ ~/= | 1760 // ~ ~/ ~/= |
1784 next = _reader.advance(); | 1761 next = _reader.advance(); |
(...skipping 24 matching lines...) Expand all Loading... |
1809 | 1786 |
1810 static const ScannerErrorCode MISSING_DIGIT = | 1787 static const ScannerErrorCode MISSING_DIGIT = |
1811 const ScannerErrorCode('MISSING_DIGIT', "Decimal digit expected"); | 1788 const ScannerErrorCode('MISSING_DIGIT', "Decimal digit expected"); |
1812 | 1789 |
1813 static const ScannerErrorCode MISSING_HEX_DIGIT = | 1790 static const ScannerErrorCode MISSING_HEX_DIGIT = |
1814 const ScannerErrorCode('MISSING_HEX_DIGIT', "Hexidecimal digit expected"); | 1791 const ScannerErrorCode('MISSING_HEX_DIGIT', "Hexidecimal digit expected"); |
1815 | 1792 |
1816 static const ScannerErrorCode MISSING_QUOTE = | 1793 static const ScannerErrorCode MISSING_QUOTE = |
1817 const ScannerErrorCode('MISSING_QUOTE', "Expected quote (' or \")"); | 1794 const ScannerErrorCode('MISSING_QUOTE', "Expected quote (' or \")"); |
1818 | 1795 |
1819 static const ScannerErrorCode UNABLE_GET_CONTENT = | 1796 static const ScannerErrorCode UNABLE_GET_CONTENT = const ScannerErrorCode( |
1820 const ScannerErrorCode('UNABLE_GET_CONTENT', "Unable to get content: {0}")
; | 1797 'UNABLE_GET_CONTENT', "Unable to get content: {0}"); |
1821 | 1798 |
1822 static const ScannerErrorCode UNTERMINATED_MULTI_LINE_COMMENT = | 1799 static const ScannerErrorCode UNTERMINATED_MULTI_LINE_COMMENT = |
1823 const ScannerErrorCode( | 1800 const ScannerErrorCode( |
1824 'UNTERMINATED_MULTI_LINE_COMMENT', | 1801 'UNTERMINATED_MULTI_LINE_COMMENT', "Unterminated multi-line comment"); |
1825 "Unterminated multi-line comment"); | |
1826 | 1802 |
1827 static const ScannerErrorCode UNTERMINATED_STRING_LITERAL = | 1803 static const ScannerErrorCode UNTERMINATED_STRING_LITERAL = |
1828 const ScannerErrorCode( | 1804 const ScannerErrorCode( |
1829 'UNTERMINATED_STRING_LITERAL', | 1805 'UNTERMINATED_STRING_LITERAL', "Unterminated string literal"); |
1830 "Unterminated string literal"); | |
1831 | 1806 |
1832 /** | 1807 /** |
1833 * Initialize a newly created error code to have the given [name]. The message | 1808 * Initialize a newly created error code to have the given [name]. The message |
1834 * associated with the error will be created from the given [message] | 1809 * associated with the error will be created from the given [message] |
1835 * template. The correction associated with the error will be created from the | 1810 * template. The correction associated with the error will be created from the |
1836 * given [correction] template. | 1811 * given [correction] template. |
1837 */ | 1812 */ |
1838 const ScannerErrorCode(String name, String message, [String correction]) | 1813 const ScannerErrorCode(String name, String message, [String correction]) |
1839 : super(name, message, correction); | 1814 : super(name, message, correction); |
1840 | 1815 |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1879 /** | 1854 /** |
1880 * The first comment in the list of comments that precede this token. | 1855 * The first comment in the list of comments that precede this token. |
1881 */ | 1856 */ |
1882 CommentToken _precedingComment; | 1857 CommentToken _precedingComment; |
1883 | 1858 |
1884 /** | 1859 /** |
1885 * Initialize a newly created token to have the given [type] at the given | 1860 * Initialize a newly created token to have the given [type] at the given |
1886 * [offset] and to be preceded by the comments reachable from the given | 1861 * [offset] and to be preceded by the comments reachable from the given |
1887 * [comment]. | 1862 * [comment]. |
1888 */ | 1863 */ |
1889 StringTokenWithComment(TokenType type, String value, int offset, | 1864 StringTokenWithComment( |
1890 this._precedingComment) | 1865 TokenType type, String value, int offset, this._precedingComment) |
1891 : super(type, value, offset) { | 1866 : super(type, value, offset) { |
1892 _setCommentParent(_precedingComment); | 1867 _setCommentParent(_precedingComment); |
1893 } | 1868 } |
1894 | 1869 |
1895 CommentToken get precedingComments => _precedingComment; | 1870 CommentToken get precedingComments => _precedingComment; |
1896 | 1871 |
1897 void set precedingComments(CommentToken comment) { | 1872 void set precedingComments(CommentToken comment) { |
1898 _precedingComment = comment; | 1873 _precedingComment = comment; |
1899 _setCommentParent(_precedingComment); | 1874 _setCommentParent(_precedingComment); |
1900 } | 1875 } |
1901 | 1876 |
1902 @override | 1877 @override |
1903 void applyDelta(int delta) { | 1878 void applyDelta(int delta) { |
1904 super.applyDelta(delta); | 1879 super.applyDelta(delta); |
1905 Token token = precedingComments; | 1880 Token token = precedingComments; |
1906 while (token != null) { | 1881 while (token != null) { |
1907 token.applyDelta(delta); | 1882 token.applyDelta(delta); |
1908 token = token.next; | 1883 token = token.next; |
1909 } | 1884 } |
1910 } | 1885 } |
1911 | 1886 |
1912 @override | 1887 @override |
1913 Token copy() => | 1888 Token copy() => new StringTokenWithComment( |
1914 new StringTokenWithComment( | 1889 type, lexeme, offset, copyComments(precedingComments)); |
1915 type, | |
1916 lexeme, | |
1917 offset, | |
1918 copyComments(precedingComments)); | |
1919 } | 1890 } |
1920 | 1891 |
1921 /** | 1892 /** |
1922 * A `SubSequenceReader` is a [CharacterReader] that reads characters from a | 1893 * A `SubSequenceReader` is a [CharacterReader] that reads characters from a |
1923 * character sequence, but adds a delta when reporting the current character | 1894 * character sequence, but adds a delta when reporting the current character |
1924 * offset so that the character sequence can be a subsequence from a larger | 1895 * offset so that the character sequence can be a subsequence from a larger |
1925 * sequence. | 1896 * sequence. |
1926 */ | 1897 */ |
1927 class SubSequenceReader extends CharSequenceReader { | 1898 class SubSequenceReader extends CharSequenceReader { |
1928 /** | 1899 /** |
(...skipping 370 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2299 static const TokenType SCRIPT_TAG = const TokenType('SCRIPT_TAG'); | 2270 static const TokenType SCRIPT_TAG = const TokenType('SCRIPT_TAG'); |
2300 | 2271 |
2301 static const TokenType SINGLE_LINE_COMMENT = | 2272 static const TokenType SINGLE_LINE_COMMENT = |
2302 const TokenType('SINGLE_LINE_COMMENT'); | 2273 const TokenType('SINGLE_LINE_COMMENT'); |
2303 | 2274 |
2304 static const TokenType STRING = const TokenType('STRING'); | 2275 static const TokenType STRING = const TokenType('STRING'); |
2305 | 2276 |
2306 static const TokenType AMPERSAND = | 2277 static const TokenType AMPERSAND = |
2307 const TokenType('AMPERSAND', TokenClass.BITWISE_AND_OPERATOR, "&"); | 2278 const TokenType('AMPERSAND', TokenClass.BITWISE_AND_OPERATOR, "&"); |
2308 | 2279 |
2309 static const TokenType AMPERSAND_AMPERSAND = | 2280 static const TokenType AMPERSAND_AMPERSAND = const TokenType( |
2310 const TokenType('AMPERSAND_AMPERSAND', TokenClass.LOGICAL_AND_OPERATOR, "&
&"); | 2281 'AMPERSAND_AMPERSAND', TokenClass.LOGICAL_AND_OPERATOR, "&&"); |
2311 | 2282 |
2312 static const TokenType AMPERSAND_EQ = | 2283 static const TokenType AMPERSAND_EQ = |
2313 const TokenType('AMPERSAND_EQ', TokenClass.ASSIGNMENT_OPERATOR, "&="); | 2284 const TokenType('AMPERSAND_EQ', TokenClass.ASSIGNMENT_OPERATOR, "&="); |
2314 | 2285 |
2315 static const TokenType AT = const TokenType('AT', TokenClass.NO_CLASS, "@"); | 2286 static const TokenType AT = const TokenType('AT', TokenClass.NO_CLASS, "@"); |
2316 | 2287 |
2317 static const TokenType BANG = | 2288 static const TokenType BANG = |
2318 const TokenType('BANG', TokenClass.UNARY_PREFIX_OPERATOR, "!"); | 2289 const TokenType('BANG', TokenClass.UNARY_PREFIX_OPERATOR, "!"); |
2319 | 2290 |
2320 static const TokenType BANG_EQ = | 2291 static const TokenType BANG_EQ = |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2403 | 2374 |
2404 static const TokenType MINUS_MINUS = | 2375 static const TokenType MINUS_MINUS = |
2405 const TokenType('MINUS_MINUS', TokenClass.UNARY_PREFIX_OPERATOR, "--"); | 2376 const TokenType('MINUS_MINUS', TokenClass.UNARY_PREFIX_OPERATOR, "--"); |
2406 | 2377 |
2407 static const TokenType OPEN_CURLY_BRACKET = | 2378 static const TokenType OPEN_CURLY_BRACKET = |
2408 const TokenType('OPEN_CURLY_BRACKET', TokenClass.NO_CLASS, "{"); | 2379 const TokenType('OPEN_CURLY_BRACKET', TokenClass.NO_CLASS, "{"); |
2409 | 2380 |
2410 static const TokenType OPEN_PAREN = | 2381 static const TokenType OPEN_PAREN = |
2411 const TokenType('OPEN_PAREN', TokenClass.UNARY_POSTFIX_OPERATOR, "("); | 2382 const TokenType('OPEN_PAREN', TokenClass.UNARY_POSTFIX_OPERATOR, "("); |
2412 | 2383 |
2413 static const TokenType OPEN_SQUARE_BRACKET = | 2384 static const TokenType OPEN_SQUARE_BRACKET = const TokenType( |
2414 const TokenType('OPEN_SQUARE_BRACKET', TokenClass.UNARY_POSTFIX_OPERATOR,
"["); | 2385 'OPEN_SQUARE_BRACKET', TokenClass.UNARY_POSTFIX_OPERATOR, "["); |
2415 | 2386 |
2416 static const TokenType PERCENT = | 2387 static const TokenType PERCENT = |
2417 const TokenType('PERCENT', TokenClass.MULTIPLICATIVE_OPERATOR, "%"); | 2388 const TokenType('PERCENT', TokenClass.MULTIPLICATIVE_OPERATOR, "%"); |
2418 | 2389 |
2419 static const TokenType PERCENT_EQ = | 2390 static const TokenType PERCENT_EQ = |
2420 const TokenType('PERCENT_EQ', TokenClass.ASSIGNMENT_OPERATOR, "%="); | 2391 const TokenType('PERCENT_EQ', TokenClass.ASSIGNMENT_OPERATOR, "%="); |
2421 | 2392 |
2422 static const TokenType PERIOD = | 2393 static const TokenType PERIOD = |
2423 const TokenType('PERIOD', TokenClass.UNARY_POSTFIX_OPERATOR, "."); | 2394 const TokenType('PERIOD', TokenClass.UNARY_POSTFIX_OPERATOR, "."); |
2424 | 2395 |
(...skipping 20 matching lines...) Expand all Loading... |
2445 | 2416 |
2446 static const TokenType SLASH_EQ = | 2417 static const TokenType SLASH_EQ = |
2447 const TokenType('SLASH_EQ', TokenClass.ASSIGNMENT_OPERATOR, "/="); | 2418 const TokenType('SLASH_EQ', TokenClass.ASSIGNMENT_OPERATOR, "/="); |
2448 | 2419 |
2449 static const TokenType STAR = | 2420 static const TokenType STAR = |
2450 const TokenType('STAR', TokenClass.MULTIPLICATIVE_OPERATOR, "*"); | 2421 const TokenType('STAR', TokenClass.MULTIPLICATIVE_OPERATOR, "*"); |
2451 | 2422 |
2452 static const TokenType STAR_EQ = | 2423 static const TokenType STAR_EQ = |
2453 const TokenType('STAR_EQ', TokenClass.ASSIGNMENT_OPERATOR, "*="); | 2424 const TokenType('STAR_EQ', TokenClass.ASSIGNMENT_OPERATOR, "*="); |
2454 | 2425 |
2455 static const TokenType STRING_INTERPOLATION_EXPRESSION = | 2426 static const TokenType STRING_INTERPOLATION_EXPRESSION = const TokenType( |
2456 const TokenType('STRING_INTERPOLATION_EXPRESSION', TokenClass.NO_CLASS, "\
${"); | 2427 'STRING_INTERPOLATION_EXPRESSION', TokenClass.NO_CLASS, "\${"); |
2457 | 2428 |
2458 static const TokenType STRING_INTERPOLATION_IDENTIFIER = | 2429 static const TokenType STRING_INTERPOLATION_IDENTIFIER = const TokenType( |
2459 const TokenType('STRING_INTERPOLATION_IDENTIFIER', TokenClass.NO_CLASS, "\
$"); | 2430 'STRING_INTERPOLATION_IDENTIFIER', TokenClass.NO_CLASS, "\$"); |
2460 | 2431 |
2461 static const TokenType TILDE = | 2432 static const TokenType TILDE = |
2462 const TokenType('TILDE', TokenClass.UNARY_PREFIX_OPERATOR, "~"); | 2433 const TokenType('TILDE', TokenClass.UNARY_PREFIX_OPERATOR, "~"); |
2463 | 2434 |
2464 static const TokenType TILDE_SLASH = | 2435 static const TokenType TILDE_SLASH = |
2465 const TokenType('TILDE_SLASH', TokenClass.MULTIPLICATIVE_OPERATOR, "~/"); | 2436 const TokenType('TILDE_SLASH', TokenClass.MULTIPLICATIVE_OPERATOR, "~/"); |
2466 | 2437 |
2467 static const TokenType TILDE_SLASH_EQ = | 2438 static const TokenType TILDE_SLASH_EQ = |
2468 const TokenType('TILDE_SLASH_EQ', TokenClass.ASSIGNMENT_OPERATOR, "~/="); | 2439 const TokenType('TILDE_SLASH_EQ', TokenClass.ASSIGNMENT_OPERATOR, "~/="); |
2469 | 2440 |
(...skipping 15 matching lines...) Expand all Loading... |
2485 * The name of the token type. | 2456 * The name of the token type. |
2486 */ | 2457 */ |
2487 final String name; | 2458 final String name; |
2488 | 2459 |
2489 /** | 2460 /** |
2490 * The lexeme that defines this type of token, or `null` if there is more than | 2461 * The lexeme that defines this type of token, or `null` if there is more than |
2491 * one possible lexeme for this type of token. | 2462 * one possible lexeme for this type of token. |
2492 */ | 2463 */ |
2493 final String lexeme; | 2464 final String lexeme; |
2494 | 2465 |
2495 const TokenType(this.name, [this._tokenClass = TokenClass.NO_CLASS, | 2466 const TokenType(this.name, |
2496 this.lexeme = null]); | 2467 [this._tokenClass = TokenClass.NO_CLASS, this.lexeme = null]); |
2497 | 2468 |
2498 /** | 2469 /** |
2499 * Return `true` if this type of token represents an additive operator. | 2470 * Return `true` if this type of token represents an additive operator. |
2500 */ | 2471 */ |
2501 bool get isAdditiveOperator => _tokenClass == TokenClass.ADDITIVE_OPERATOR; | 2472 bool get isAdditiveOperator => _tokenClass == TokenClass.ADDITIVE_OPERATOR; |
2502 | 2473 |
2503 /** | 2474 /** |
2504 * Return `true` if this type of token represents an assignment operator. | 2475 * Return `true` if this type of token represents an assignment operator. |
2505 */ | 2476 */ |
2506 bool get isAssignmentOperator => | 2477 bool get isAssignmentOperator => |
2507 _tokenClass == TokenClass.ASSIGNMENT_OPERATOR; | 2478 _tokenClass == TokenClass.ASSIGNMENT_OPERATOR; |
2508 | 2479 |
2509 /** | 2480 /** |
2510 * Return `true` if this type of token represents an associative operator. An | 2481 * Return `true` if this type of token represents an associative operator. An |
2511 * associative operator is an operator for which the following equality is | 2482 * associative operator is an operator for which the following equality is |
2512 * true: `(a * b) * c == a * (b * c)`. In other words, if the result of | 2483 * true: `(a * b) * c == a * (b * c)`. In other words, if the result of |
2513 * applying the operator to multiple operands does not depend on the order in | 2484 * applying the operator to multiple operands does not depend on the order in |
2514 * which those applications occur. | 2485 * which those applications occur. |
2515 * | 2486 * |
2516 * Note: This method considers the logical-and and logical-or operators to be | 2487 * Note: This method considers the logical-and and logical-or operators to be |
2517 * associative, even though the order in which the application of those | 2488 * associative, even though the order in which the application of those |
2518 * operators can have an effect because evaluation of the right-hand operand | 2489 * operators can have an effect because evaluation of the right-hand operand |
2519 * is conditional. | 2490 * is conditional. |
2520 */ | 2491 */ |
2521 bool get isAssociativeOperator => | 2492 bool get isAssociativeOperator => this == AMPERSAND || |
2522 this == AMPERSAND || | 2493 this == AMPERSAND_AMPERSAND || |
2523 this == AMPERSAND_AMPERSAND || | 2494 this == BAR || |
2524 this == BAR || | 2495 this == BAR_BAR || |
2525 this == BAR_BAR || | 2496 this == CARET || |
2526 this == CARET || | 2497 this == PLUS || |
2527 this == PLUS || | 2498 this == STAR; |
2528 this == STAR; | |
2529 | 2499 |
2530 /** | 2500 /** |
2531 * Return `true` if this type of token represents an equality operator. | 2501 * Return `true` if this type of token represents an equality operator. |
2532 */ | 2502 */ |
2533 bool get isEqualityOperator => _tokenClass == TokenClass.EQUALITY_OPERATOR; | 2503 bool get isEqualityOperator => _tokenClass == TokenClass.EQUALITY_OPERATOR; |
2534 | 2504 |
2535 /** | 2505 /** |
2536 * Return `true` if this type of token represents an increment operator. | 2506 * Return `true` if this type of token represents an increment operator. |
2537 */ | 2507 */ |
2538 bool get isIncrementOperator => | 2508 bool get isIncrementOperator => |
2539 identical(lexeme, "++") || identical(lexeme, "--"); | 2509 identical(lexeme, "++") || identical(lexeme, "--"); |
2540 | 2510 |
2541 /** | 2511 /** |
2542 * Return `true` if this type of token represents a multiplicative operator. | 2512 * Return `true` if this type of token represents a multiplicative operator. |
2543 */ | 2513 */ |
2544 bool get isMultiplicativeOperator => | 2514 bool get isMultiplicativeOperator => |
2545 _tokenClass == TokenClass.MULTIPLICATIVE_OPERATOR; | 2515 _tokenClass == TokenClass.MULTIPLICATIVE_OPERATOR; |
2546 | 2516 |
2547 /** | 2517 /** |
2548 * Return `true` if this token type represents an operator. | 2518 * Return `true` if this token type represents an operator. |
2549 */ | 2519 */ |
2550 bool get isOperator => | 2520 bool get isOperator => _tokenClass != TokenClass.NO_CLASS && |
2551 _tokenClass != TokenClass.NO_CLASS && | 2521 this != OPEN_PAREN && |
2552 this != OPEN_PAREN && | 2522 this != OPEN_SQUARE_BRACKET && |
2553 this != OPEN_SQUARE_BRACKET && | 2523 this != PERIOD; |
2554 this != PERIOD; | |
2555 | 2524 |
2556 /** | 2525 /** |
2557 * Return `true` if this type of token represents a relational operator. | 2526 * Return `true` if this type of token represents a relational operator. |
2558 */ | 2527 */ |
2559 bool get isRelationalOperator => | 2528 bool get isRelationalOperator => |
2560 _tokenClass == TokenClass.RELATIONAL_OPERATOR; | 2529 _tokenClass == TokenClass.RELATIONAL_OPERATOR; |
2561 | 2530 |
2562 /** | 2531 /** |
2563 * Return `true` if this type of token represents a shift operator. | 2532 * Return `true` if this type of token represents a shift operator. |
2564 */ | 2533 */ |
2565 bool get isShiftOperator => _tokenClass == TokenClass.SHIFT_OPERATOR; | 2534 bool get isShiftOperator => _tokenClass == TokenClass.SHIFT_OPERATOR; |
2566 | 2535 |
2567 /** | 2536 /** |
2568 * Return `true` if this type of token represents a unary postfix operator. | 2537 * Return `true` if this type of token represents a unary postfix operator. |
2569 */ | 2538 */ |
2570 bool get isUnaryPostfixOperator => | 2539 bool get isUnaryPostfixOperator => |
2571 _tokenClass == TokenClass.UNARY_POSTFIX_OPERATOR; | 2540 _tokenClass == TokenClass.UNARY_POSTFIX_OPERATOR; |
2572 | 2541 |
2573 /** | 2542 /** |
2574 * Return `true` if this type of token represents a unary prefix operator. | 2543 * Return `true` if this type of token represents a unary prefix operator. |
2575 */ | 2544 */ |
2576 bool get isUnaryPrefixOperator => | 2545 bool get isUnaryPrefixOperator => |
2577 _tokenClass == TokenClass.UNARY_PREFIX_OPERATOR; | 2546 _tokenClass == TokenClass.UNARY_PREFIX_OPERATOR; |
2578 | 2547 |
2579 /** | 2548 /** |
2580 * Return `true` if this token type represents an operator that can be defined | 2549 * Return `true` if this token type represents an operator that can be defined |
2581 * by users. | 2550 * by users. |
2582 */ | 2551 */ |
2583 bool get isUserDefinableOperator => | 2552 bool get isUserDefinableOperator => identical(lexeme, "==") || |
2584 identical(lexeme, "==") || | 2553 identical(lexeme, "~") || |
2585 identical(lexeme, "~") || | 2554 identical(lexeme, "[]") || |
2586 identical(lexeme, "[]") || | 2555 identical(lexeme, "[]=") || |
2587 identical(lexeme, "[]=") || | 2556 identical(lexeme, "*") || |
2588 identical(lexeme, "*") || | 2557 identical(lexeme, "/") || |
2589 identical(lexeme, "/") || | 2558 identical(lexeme, "%") || |
2590 identical(lexeme, "%") || | 2559 identical(lexeme, "~/") || |
2591 identical(lexeme, "~/") || | 2560 identical(lexeme, "+") || |
2592 identical(lexeme, "+") || | 2561 identical(lexeme, "-") || |
2593 identical(lexeme, "-") || | 2562 identical(lexeme, "<<") || |
2594 identical(lexeme, "<<") || | 2563 identical(lexeme, ">>") || |
2595 identical(lexeme, ">>") || | 2564 identical(lexeme, ">=") || |
2596 identical(lexeme, ">=") || | 2565 identical(lexeme, ">") || |
2597 identical(lexeme, ">") || | 2566 identical(lexeme, "<=") || |
2598 identical(lexeme, "<=") || | 2567 identical(lexeme, "<") || |
2599 identical(lexeme, "<") || | 2568 identical(lexeme, "&") || |
2600 identical(lexeme, "&") || | 2569 identical(lexeme, "^") || |
2601 identical(lexeme, "^") || | 2570 identical(lexeme, "|"); |
2602 identical(lexeme, "|"); | |
2603 | 2571 |
2604 /** | 2572 /** |
2605 * Return the precedence of the token, or `0` if the token does not represent | 2573 * Return the precedence of the token, or `0` if the token does not represent |
2606 * an operator. | 2574 * an operator. |
2607 */ | 2575 */ |
2608 int get precedence => _tokenClass.precedence; | 2576 int get precedence => _tokenClass.precedence; |
2609 | 2577 |
2610 @override | 2578 @override |
2611 String toString() => name; | 2579 String toString() => name; |
2612 } | 2580 } |
(...skipping 27 matching lines...) Expand all Loading... |
2640 CommentToken get precedingComments => _precedingComment; | 2608 CommentToken get precedingComments => _precedingComment; |
2641 | 2609 |
2642 void set precedingComments(CommentToken comment) { | 2610 void set precedingComments(CommentToken comment) { |
2643 _precedingComment = comment; | 2611 _precedingComment = comment; |
2644 _setCommentParent(_precedingComment); | 2612 _setCommentParent(_precedingComment); |
2645 } | 2613 } |
2646 | 2614 |
2647 @override | 2615 @override |
2648 Token copy() => new TokenWithComment(type, offset, precedingComments); | 2616 Token copy() => new TokenWithComment(type, offset, precedingComments); |
2649 } | 2617 } |
OLD | NEW |