Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(230)

Side by Side Diff: dart/pkg/analyzer/lib/src/generated/scanner.dart

Issue 56933002: Version 0.8.10.1 (Closed) Base URL: http://dart.googlecode.com/svn/trunk/
Patch Set: Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « dart/pkg/analyzer/lib/src/generated/resolver.dart ('k') | dart/pkg/analyzer/pubspec.yaml » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // This code was auto-generated, is not intended to be edited, and is subject to 1 // This code was auto-generated, is not intended to be edited, and is subject to
2 // significant change. Please see the README file for more information. 2 // significant change. Please see the README file for more information.
3 library engine.scanner; 3 library engine.scanner;
4 import 'dart:collection'; 4 import 'dart:collection';
5 import 'java_core.dart'; 5 import 'java_core.dart';
6 import 'java_engine.dart'; 6 import 'java_engine.dart';
7 import 'source.dart'; 7 import 'source.dart';
8 import 'error.dart'; 8 import 'error.dart';
9 import 'instrumentation.dart'; 9 import 'instrumentation.dart';
10 import 'utilities_collection.dart' show TokenMap; 10 import 'utilities_collection.dart' show TokenMap;
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after
130 */ 130 */
131 KeywordState next(int c) => _table[c - 0x61]; 131 KeywordState next(int c) => _table[c - 0x61];
132 } 132 }
133 /** 133 /**
134 * The enumeration `ScannerErrorCode` defines the error codes used for errors de tected by the 134 * The enumeration `ScannerErrorCode` defines the error codes used for errors de tected by the
135 * scanner. 135 * scanner.
136 * 136 *
137 * @coverage dart.engine.parser 137 * @coverage dart.engine.parser
138 */ 138 */
139 class ScannerErrorCode extends Enum<ScannerErrorCode> implements ErrorCode { 139 class ScannerErrorCode extends Enum<ScannerErrorCode> implements ErrorCode {
140 static final ScannerErrorCode CHARACTER_EXPECTED_AFTER_SLASH = new ScannerErro rCode.con1('CHARACTER_EXPECTED_AFTER_SLASH', 0, "Character expected after slash" ); 140 static final ScannerErrorCode ILLEGAL_CHARACTER = new ScannerErrorCode.con1('I LLEGAL_CHARACTER', 0, "Illegal character %x");
141 static final ScannerErrorCode ILLEGAL_CHARACTER = new ScannerErrorCode.con1('I LLEGAL_CHARACTER', 1, "Illegal character %x"); 141 static final ScannerErrorCode MISSING_DIGIT = new ScannerErrorCode.con1('MISSI NG_DIGIT', 1, "Decimal digit expected");
142 static final ScannerErrorCode MISSING_DIGIT = new ScannerErrorCode.con1('MISSI NG_DIGIT', 2, "Decimal digit expected"); 142 static final ScannerErrorCode MISSING_HEX_DIGIT = new ScannerErrorCode.con1('M ISSING_HEX_DIGIT', 2, "Hexidecimal digit expected");
143 static final ScannerErrorCode MISSING_HEX_DIGIT = new ScannerErrorCode.con1('M ISSING_HEX_DIGIT', 3, "Hexidecimal digit expected"); 143 static final ScannerErrorCode MISSING_QUOTE = new ScannerErrorCode.con1('MISSI NG_QUOTE', 3, "Expected quote (' or \")");
144 static final ScannerErrorCode MISSING_QUOTE = new ScannerErrorCode.con1('MISSI NG_QUOTE', 4, "Expected quote (' or \")"); 144 static final ScannerErrorCode UNTERMINATED_MULTI_LINE_COMMENT = new ScannerErr orCode.con1('UNTERMINATED_MULTI_LINE_COMMENT', 4, "Unterminated multi-line comme nt");
145 static final ScannerErrorCode UNTERMINATED_MULTI_LINE_COMMENT = new ScannerErr orCode.con1('UNTERMINATED_MULTI_LINE_COMMENT', 5, "Unterminated multi-line comme nt"); 145 static final ScannerErrorCode UNTERMINATED_STRING_LITERAL = new ScannerErrorCo de.con1('UNTERMINATED_STRING_LITERAL', 5, "Unterminated string literal");
146 static final ScannerErrorCode UNTERMINATED_STRING_LITERAL = new ScannerErrorCo de.con1('UNTERMINATED_STRING_LITERAL', 6, "Unterminated string literal");
147 static final List<ScannerErrorCode> values = [ 146 static final List<ScannerErrorCode> values = [
148 CHARACTER_EXPECTED_AFTER_SLASH,
149 ILLEGAL_CHARACTER, 147 ILLEGAL_CHARACTER,
150 MISSING_DIGIT, 148 MISSING_DIGIT,
151 MISSING_HEX_DIGIT, 149 MISSING_HEX_DIGIT,
152 MISSING_QUOTE, 150 MISSING_QUOTE,
153 UNTERMINATED_MULTI_LINE_COMMENT, 151 UNTERMINATED_MULTI_LINE_COMMENT,
154 UNTERMINATED_STRING_LITERAL]; 152 UNTERMINATED_STRING_LITERAL];
155 153
156 /** 154 /**
157 * The template used to create the message to be displayed for this error. 155 * The template used to create the message to be displayed for this error.
158 */ 156 */
(...skipping 303 matching lines...) Expand 10 before | Expand all | Expand 10 after
462 class IncrementalScanner extends Scanner { 460 class IncrementalScanner extends Scanner {
463 461
464 /** 462 /**
465 * The reader used to access the characters in the source. 463 * The reader used to access the characters in the source.
466 */ 464 */
467 CharacterReader _reader; 465 CharacterReader _reader;
468 466
469 /** 467 /**
470 * A map from tokens that were copied to the copies of the tokens. 468 * A map from tokens that were copied to the copies of the tokens.
471 */ 469 */
472 TokenMap _tokenMap = new TokenMap(); 470 final TokenMap tokenMap = new TokenMap();
471
472 /**
473 * The first token in the range of tokens that are different from the tokens i n the original token
474 * stream.
475 */
476 Token _firstToken;
477
478 /**
479 * The last token in the range of tokens that are different from the tokens in the original token
480 * stream.
481 */
482 Token lastToken;
473 483
474 /** 484 /**
475 * Initialize a newly created scanner. 485 * Initialize a newly created scanner.
476 * 486 *
477 * @param source the source being scanned 487 * @param source the source being scanned
478 * @param reader the character reader used to read the characters in the sourc e 488 * @param reader the character reader used to read the characters in the sourc e
479 * @param errorListener the error listener that will be informed of any errors that are found 489 * @param errorListener the error listener that will be informed of any errors that are found
480 */ 490 */
481 IncrementalScanner(Source source, CharacterReader reader, AnalysisErrorListene r errorListener) : super(source, reader, errorListener) { 491 IncrementalScanner(Source source, CharacterReader reader, AnalysisErrorListene r errorListener) : super(source, reader, errorListener) {
482 this._reader = reader; 492 this._reader = reader;
483 } 493 }
484 494
485 /** 495 /**
496 * Return the first token in the range of tokens that are different from the t okens in the
497 * original token stream or `null` if the new tokens are the same as the origi nal tokens
498 * except for offset.
499 *
500 * @return the first token in the range of new tokens
501 */
502 Token get firstToken => _firstToken;
503
504 /**
486 * Given the stream of tokens scanned from the original source, the modified s ource (the result of 505 * Given the stream of tokens scanned from the original source, the modified s ource (the result of
487 * replacing one contiguous range of characters with another string of charact ers), and a 506 * replacing one contiguous range of characters with another string of charact ers), and a
488 * specification of the modification that was made, return a stream of tokens scanned from the 507 * specification of the modification that was made, return a stream of tokens scanned from the
489 * modified source. The original stream of tokens will not be modified. 508 * modified source. The original stream of tokens will not be modified.
490 * 509 *
491 * @param originalStream the stream of tokens scanned from the original source 510 * @param originalStream the stream of tokens scanned from the original source
492 * @param index the index of the first character in both the original and modi fied source that was 511 * @param index the index of the first character in both the original and modi fied source that was
493 * affected by the modification 512 * affected by the modification
494 * @param removedLength the number of characters removed from the original sou rce 513 * @param removedLength the number of characters removed from the original sou rce
495 * @param insertedLength the number of characters added to the modified source 514 * @param insertedLength the number of characters added to the modified source
496 */ 515 */
497 Token rescan(Token originalStream, int index, int removedLength, int insertedL ength) { 516 Token rescan(Token originalStream, int index, int removedLength, int insertedL ength) {
498 while (originalStream.end < index) { 517 while (originalStream.type != TokenType.EOF && originalStream.end < index) {
499 originalStream = copyAndAdvance(originalStream, 0); 518 originalStream = copyAndAdvance(originalStream, 0);
500 } 519 }
520 Token lastCopied = tail;
501 int modifiedEnd = index + insertedLength - 1; 521 int modifiedEnd = index + insertedLength - 1;
522 if (originalStream.offset < index) {
523 modifiedEnd += originalStream.end - index - removedLength;
524 }
502 _reader.offset = Math.min(originalStream.offset, index) - 1; 525 _reader.offset = Math.min(originalStream.offset, index) - 1;
503 int next = _reader.advance(); 526 int next = _reader.advance();
504 while (next != -1 && _reader.offset <= modifiedEnd) { 527 while (next != -1 && _reader.offset <= modifiedEnd) {
505 next = bigSwitch(next); 528 next = bigSwitch(next);
506 } 529 }
507 int removedEnd = index + removedLength - 1; 530 _firstToken = lastCopied.next;
531 lastToken = tail;
532 if (_firstToken == null || identical(_firstToken.type, TokenType.EOF)) {
533 _firstToken = null;
534 lastToken = null;
535 } else if (originalStream.end == index && _firstToken.end == index) {
536 tokenMap.put(originalStream, _firstToken);
537 if (identical(lastToken, _firstToken)) {
538 lastToken = lastToken.next;
539 }
540 _firstToken = _firstToken.next;
541 }
542 int removedEnd = index + removedLength - 1 + Math.max(0, tail.end - index - insertedLength);
508 while (originalStream.offset <= removedEnd) { 543 while (originalStream.offset <= removedEnd) {
509 originalStream = originalStream.next; 544 originalStream = originalStream.next;
510 } 545 }
511 int delta = insertedLength - removedLength; 546 int delta = insertedLength - removedLength;
512 while (originalStream.type != TokenType.EOF) { 547 while (originalStream.type != TokenType.EOF) {
513 originalStream = copyAndAdvance(originalStream, delta); 548 originalStream = copyAndAdvance(originalStream, delta);
514 } 549 }
515 copyAndAdvance(originalStream, delta); 550 Token eof = copyAndAdvance(originalStream, delta);
516 return firstToken(); 551 eof.setNextWithoutSettingPrevious(eof);
552 return super.firstToken;
517 } 553 }
518 Token copyAndAdvance(Token originalToken, int delta) { 554 Token copyAndAdvance(Token originalToken, int delta) {
519 Token copiedToken = originalToken.copy(); 555 Token copiedToken = originalToken.copy();
520 _tokenMap.put(originalToken, copiedToken); 556 tokenMap.put(originalToken, copiedToken);
521 copiedToken.applyDelta(delta); 557 copiedToken.applyDelta(delta);
522 appendToken(copiedToken); 558 appendToken(copiedToken);
523 Token originalComment = originalToken.precedingComments; 559 Token originalComment = originalToken.precedingComments;
524 Token copiedComment = originalToken.precedingComments; 560 Token copiedComment = originalToken.precedingComments;
525 while (originalComment != null) { 561 while (originalComment != null) {
526 _tokenMap.put(originalComment, copiedComment); 562 tokenMap.put(originalComment, copiedComment);
527 originalComment = originalComment.next; 563 originalComment = originalComment.next;
528 copiedComment = copiedComment.next; 564 copiedComment = copiedComment.next;
529 } 565 }
530 return originalToken.next; 566 return originalToken.next;
531 } 567 }
532 } 568 }
533 /** 569 /**
534 * The class `Scanner` implements a scanner for Dart code. 570 * The class `Scanner` implements a scanner for Dart code.
535 * 571 *
536 * The lexical structure of Dart is ambiguous without knowledge of the context i n which a token is 572 * The lexical structure of Dart is ambiguous without knowledge of the context i n which a token is
(...skipping 22 matching lines...) Expand all
559 AnalysisErrorListener _errorListener; 595 AnalysisErrorListener _errorListener;
560 596
561 /** 597 /**
562 * The token pointing to the head of the linked list of tokens. 598 * The token pointing to the head of the linked list of tokens.
563 */ 599 */
564 Token _tokens; 600 Token _tokens;
565 601
566 /** 602 /**
567 * The last token that was scanned. 603 * The last token that was scanned.
568 */ 604 */
569 Token _tail; 605 Token tail;
570 606
571 /** 607 /**
572 * The first token in the list of comment tokens found since the last non-comm ent token. 608 * The first token in the list of comment tokens found since the last non-comm ent token.
573 */ 609 */
574 Token _firstComment; 610 Token _firstComment;
575 611
576 /** 612 /**
577 * The last token in the list of comment tokens found since the last non-comme nt token. 613 * The last token in the list of comment tokens found since the last non-comme nt token.
578 */ 614 */
579 Token _lastComment; 615 Token _lastComment;
(...skipping 30 matching lines...) Expand all
610 * @param source the source being scanned 646 * @param source the source being scanned
611 * @param reader the character reader used to read the characters in the sourc e 647 * @param reader the character reader used to read the characters in the sourc e
612 * @param errorListener the error listener that will be informed of any errors that are found 648 * @param errorListener the error listener that will be informed of any errors that are found
613 */ 649 */
614 Scanner(Source source, CharacterReader reader, AnalysisErrorListener errorList ener) { 650 Scanner(Source source, CharacterReader reader, AnalysisErrorListener errorList ener) {
615 this.source = source; 651 this.source = source;
616 this._reader = reader; 652 this._reader = reader;
617 this._errorListener = errorListener; 653 this._errorListener = errorListener;
618 _tokens = new Token(TokenType.EOF, -1); 654 _tokens = new Token(TokenType.EOF, -1);
619 _tokens.setNext(_tokens); 655 _tokens.setNext(_tokens);
620 _tail = _tokens; 656 tail = _tokens;
621 _tokenStart = -1; 657 _tokenStart = -1;
622 _lineStarts.add(0); 658 _lineStarts.add(0);
623 } 659 }
624 660
625 /** 661 /**
626 * Return an array containing the offsets of the first character of each line in the source code. 662 * Return an array containing the offsets of the first character of each line in the source code.
627 * 663 *
628 * @return an array containing the offsets of the first character of each line in the source code 664 * @return an array containing the offsets of the first character of each line in the source code
629 */ 665 */
630 List<int> get lineStarts => _lineStarts; 666 List<int> get lineStarts => _lineStarts;
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
667 InstrumentationBuilder instrumentation = Instrumentation.builder2("dart.engi ne.AbstractScanner.tokenize"); 703 InstrumentationBuilder instrumentation = Instrumentation.builder2("dart.engi ne.AbstractScanner.tokenize");
668 int tokenCounter = 0; 704 int tokenCounter = 0;
669 try { 705 try {
670 int next = _reader.advance(); 706 int next = _reader.advance();
671 while (next != -1) { 707 while (next != -1) {
672 tokenCounter++; 708 tokenCounter++;
673 next = bigSwitch(next); 709 next = bigSwitch(next);
674 } 710 }
675 appendEofToken(); 711 appendEofToken();
676 instrumentation.metric2("tokensCount", tokenCounter); 712 instrumentation.metric2("tokensCount", tokenCounter);
677 return firstToken(); 713 return firstToken;
678 } finally { 714 } finally {
679 instrumentation.log2(2); 715 instrumentation.log2(2);
680 } 716 }
681 } 717 }
682 718
683 /** 719 /**
684 * Append the given token to the end of the token stream being scanned. This m ethod is intended to 720 * Append the given token to the end of the token stream being scanned. This m ethod is intended to
685 * be used by subclasses that copy existing tokens and should not normally be used because it will 721 * be used by subclasses that copy existing tokens and should not normally be used because it will
686 * fail to correctly associate any comments with the token being passed in. 722 * fail to correctly associate any comments with the token being passed in.
687 * 723 *
688 * @param token the token to be appended 724 * @param token the token to be appended
689 */ 725 */
690 void appendToken(Token token) { 726 void appendToken(Token token) {
691 _tail = _tail.setNext(token); 727 tail = tail.setNext(token);
692 } 728 }
693 int bigSwitch(int next) { 729 int bigSwitch(int next) {
694 beginToken(); 730 beginToken();
695 if (next == 0xD) { 731 if (next == 0xD) {
696 next = _reader.advance(); 732 next = _reader.advance();
697 if (next == 0xA) { 733 if (next == 0xA) {
698 next = _reader.advance(); 734 next = _reader.advance();
699 } 735 }
700 recordStartOfLine(); 736 recordStartOfLine();
701 return next; 737 return next;
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after
829 } 865 }
830 reportError(ScannerErrorCode.ILLEGAL_CHARACTER, [next]); 866 reportError(ScannerErrorCode.ILLEGAL_CHARACTER, [next]);
831 return _reader.advance(); 867 return _reader.advance();
832 } 868 }
833 869
834 /** 870 /**
835 * Return the first token in the token stream that was scanned. 871 * Return the first token in the token stream that was scanned.
836 * 872 *
837 * @return the first token in the token stream that was scanned 873 * @return the first token in the token stream that was scanned
838 */ 874 */
839 Token firstToken() => _tokens.next; 875 Token get firstToken => _tokens.next;
840 876
841 /** 877 /**
842 * Record the fact that we are at the beginning of a new line in the source. 878 * Record the fact that we are at the beginning of a new line in the source.
843 */ 879 */
844 void recordStartOfLine() { 880 void recordStartOfLine() {
845 _lineStarts.add(_reader.offset); 881 _lineStarts.add(_reader.offset);
846 } 882 }
847 void appendBeginToken(TokenType type) { 883 void appendBeginToken(TokenType type) {
848 BeginToken token; 884 BeginToken token;
849 if (_firstComment == null) { 885 if (_firstComment == null) {
850 token = new BeginToken(type, _tokenStart); 886 token = new BeginToken(type, _tokenStart);
851 } else { 887 } else {
852 token = new BeginTokenWithComment(type, _tokenStart, _firstComment); 888 token = new BeginTokenWithComment(type, _tokenStart, _firstComment);
853 _firstComment = null; 889 _firstComment = null;
854 _lastComment = null; 890 _lastComment = null;
855 } 891 }
856 _tail = _tail.setNext(token); 892 tail = tail.setNext(token);
857 _groupingStack.add(token); 893 _groupingStack.add(token);
858 _stackEnd++; 894 _stackEnd++;
859 } 895 }
860 void appendCommentToken(TokenType type, String value) { 896 void appendCommentToken(TokenType type, String value) {
861 if (_firstComment == null) { 897 if (_firstComment == null) {
862 _firstComment = new StringToken(type, value, _tokenStart); 898 _firstComment = new StringToken(type, value, _tokenStart);
863 _lastComment = _firstComment; 899 _lastComment = _firstComment;
864 } else { 900 } else {
865 _lastComment = _lastComment.setNext(new StringToken(type, value, _tokenSta rt)); 901 _lastComment = _lastComment.setNext(new StringToken(type, value, _tokenSta rt));
866 } 902 }
867 } 903 }
868 void appendEndToken(TokenType type, TokenType beginType) { 904 void appendEndToken(TokenType type, TokenType beginType) {
869 Token token; 905 Token token;
870 if (_firstComment == null) { 906 if (_firstComment == null) {
871 token = new Token(type, _tokenStart); 907 token = new Token(type, _tokenStart);
872 } else { 908 } else {
873 token = new TokenWithComment(type, _tokenStart, _firstComment); 909 token = new TokenWithComment(type, _tokenStart, _firstComment);
874 _firstComment = null; 910 _firstComment = null;
875 _lastComment = null; 911 _lastComment = null;
876 } 912 }
877 _tail = _tail.setNext(token); 913 tail = tail.setNext(token);
878 if (_stackEnd >= 0) { 914 if (_stackEnd >= 0) {
879 BeginToken begin = _groupingStack[_stackEnd]; 915 BeginToken begin = _groupingStack[_stackEnd];
880 if (identical(begin.type, beginType)) { 916 if (identical(begin.type, beginType)) {
881 begin.endToken = token; 917 begin.endToken = token;
882 _groupingStack.removeAt(_stackEnd--); 918 _groupingStack.removeAt(_stackEnd--);
883 } 919 }
884 } 920 }
885 } 921 }
886 void appendEofToken() { 922 void appendEofToken() {
887 Token eofToken; 923 Token eofToken;
888 if (_firstComment == null) { 924 if (_firstComment == null) {
889 eofToken = new Token(TokenType.EOF, _reader.offset + 1); 925 eofToken = new Token(TokenType.EOF, _reader.offset + 1);
890 } else { 926 } else {
891 eofToken = new TokenWithComment(TokenType.EOF, _reader.offset + 1, _firstC omment); 927 eofToken = new TokenWithComment(TokenType.EOF, _reader.offset + 1, _firstC omment);
892 _firstComment = null; 928 _firstComment = null;
893 _lastComment = null; 929 _lastComment = null;
894 } 930 }
895 eofToken.setNext(eofToken); 931 eofToken.setNext(eofToken);
896 _tail = _tail.setNext(eofToken); 932 tail = tail.setNext(eofToken);
897 if (_stackEnd >= 0) { 933 if (_stackEnd >= 0) {
898 _hasUnmatchedGroups2 = true; 934 _hasUnmatchedGroups2 = true;
899 } 935 }
900 } 936 }
901 void appendKeywordToken(Keyword keyword) { 937 void appendKeywordToken(Keyword keyword) {
902 if (_firstComment == null) { 938 if (_firstComment == null) {
903 _tail = _tail.setNext(new KeywordToken(keyword, _tokenStart)); 939 tail = tail.setNext(new KeywordToken(keyword, _tokenStart));
904 } else { 940 } else {
905 _tail = _tail.setNext(new KeywordTokenWithComment(keyword, _tokenStart, _f irstComment)); 941 tail = tail.setNext(new KeywordTokenWithComment(keyword, _tokenStart, _fir stComment));
906 _firstComment = null; 942 _firstComment = null;
907 _lastComment = null; 943 _lastComment = null;
908 } 944 }
909 } 945 }
910 void appendStringToken(TokenType type, String value) { 946 void appendStringToken(TokenType type, String value) {
911 if (_firstComment == null) { 947 if (_firstComment == null) {
912 _tail = _tail.setNext(new StringToken(type, value, _tokenStart)); 948 tail = tail.setNext(new StringToken(type, value, _tokenStart));
913 } else { 949 } else {
914 _tail = _tail.setNext(new StringTokenWithComment(type, value, _tokenStart, _firstComment)); 950 tail = tail.setNext(new StringTokenWithComment(type, value, _tokenStart, _ firstComment));
915 _firstComment = null; 951 _firstComment = null;
916 _lastComment = null; 952 _lastComment = null;
917 } 953 }
918 } 954 }
919 void appendStringToken2(TokenType type, String value, int offset) { 955 void appendStringToken2(TokenType type, String value, int offset) {
920 if (_firstComment == null) { 956 if (_firstComment == null) {
921 _tail = _tail.setNext(new StringToken(type, value, _tokenStart + offset)); 957 tail = tail.setNext(new StringToken(type, value, _tokenStart + offset));
922 } else { 958 } else {
923 _tail = _tail.setNext(new StringTokenWithComment(type, value, _tokenStart + offset, _firstComment)); 959 tail = tail.setNext(new StringTokenWithComment(type, value, _tokenStart + offset, _firstComment));
924 _firstComment = null; 960 _firstComment = null;
925 _lastComment = null; 961 _lastComment = null;
926 } 962 }
927 } 963 }
928 void appendToken2(TokenType type) { 964 void appendToken2(TokenType type) {
929 if (_firstComment == null) { 965 if (_firstComment == null) {
930 _tail = _tail.setNext(new Token(type, _tokenStart)); 966 tail = tail.setNext(new Token(type, _tokenStart));
931 } else { 967 } else {
932 _tail = _tail.setNext(new TokenWithComment(type, _tokenStart, _firstCommen t)); 968 tail = tail.setNext(new TokenWithComment(type, _tokenStart, _firstComment) );
933 _firstComment = null; 969 _firstComment = null;
934 _lastComment = null; 970 _lastComment = null;
935 } 971 }
936 } 972 }
937 void appendToken3(TokenType type, int offset) { 973 void appendToken3(TokenType type, int offset) {
938 if (_firstComment == null) { 974 if (_firstComment == null) {
939 _tail = _tail.setNext(new Token(type, offset)); 975 tail = tail.setNext(new Token(type, offset));
940 } else { 976 } else {
941 _tail = _tail.setNext(new TokenWithComment(type, offset, _firstComment)); 977 tail = tail.setNext(new TokenWithComment(type, offset, _firstComment));
942 _firstComment = null; 978 _firstComment = null;
943 _lastComment = null; 979 _lastComment = null;
944 } 980 }
945 } 981 }
946 void beginToken() { 982 void beginToken() {
947 _tokenStart = _reader.offset; 983 _tokenStart = _reader.offset;
948 } 984 }
949 985
950 /** 986 /**
951 * Return the beginning token corresponding to a closing brace that was found while scanning 987 * Return the beginning token corresponding to a closing brace that was found while scanning
(...skipping 378 matching lines...) Expand 10 before | Expand all | Expand 10 after
1330 return _reader.advance(); 1366 return _reader.advance();
1331 } 1367 }
1332 } 1368 }
1333 continue; 1369 continue;
1334 } 1370 }
1335 if (next == 0x5C) { 1371 if (next == 0x5C) {
1336 next = _reader.advance(); 1372 next = _reader.advance();
1337 if (next == -1) { 1373 if (next == -1) {
1338 break; 1374 break;
1339 } 1375 }
1340 bool missingCharacter = false;
1341 if (next == 0xD) { 1376 if (next == 0xD) {
1342 missingCharacter = true;
1343 next = _reader.advance(); 1377 next = _reader.advance();
1344 if (next == 0xA) { 1378 if (next == 0xA) {
1345 next = _reader.advance(); 1379 next = _reader.advance();
1346 } 1380 }
1347 recordStartOfLine(); 1381 recordStartOfLine();
1348 } else if (next == 0xA) { 1382 } else if (next == 0xA) {
1349 missingCharacter = true;
1350 recordStartOfLine(); 1383 recordStartOfLine();
1351 next = _reader.advance(); 1384 next = _reader.advance();
1352 } else { 1385 } else {
1353 next = _reader.advance(); 1386 next = _reader.advance();
1354 } 1387 }
1355 if (missingCharacter) {
1356 _errorListener.onError(new AnalysisError.con2(source, _reader.offset - 1, 1, ScannerErrorCode.CHARACTER_EXPECTED_AFTER_SLASH, []));
1357 }
1358 } else if (next == 0xD) { 1388 } else if (next == 0xD) {
1359 next = _reader.advance(); 1389 next = _reader.advance();
1360 if (next == 0xA) { 1390 if (next == 0xA) {
1361 next = _reader.advance(); 1391 next = _reader.advance();
1362 } 1392 }
1363 recordStartOfLine(); 1393 recordStartOfLine();
1364 } else if (next == 0xA) { 1394 } else if (next == 0xA) {
1365 recordStartOfLine(); 1395 recordStartOfLine();
1366 next = _reader.advance(); 1396 next = _reader.advance();
1367 } else { 1397 } else {
(...skipping 365 matching lines...) Expand 10 before | Expand all | Expand 10 after
1733 */ 1763 */
1734 Token copyComments(Token token) { 1764 Token copyComments(Token token) {
1735 if (token == null) { 1765 if (token == null) {
1736 return null; 1766 return null;
1737 } 1767 }
1738 Token head = token.copy(); 1768 Token head = token.copy();
1739 Token tail = head; 1769 Token tail = head;
1740 token = token.next; 1770 token = token.next;
1741 while (token != null) { 1771 while (token != null) {
1742 tail = tail.setNext(token.copy()); 1772 tail = tail.setNext(token.copy());
1773 token = token.next;
1743 } 1774 }
1744 return head; 1775 return head;
1745 } 1776 }
1746 } 1777 }
1747 /** 1778 /**
1748 * The interface `CharacterReader` 1779 * The interface `CharacterReader`
1749 */ 1780 */
1750 abstract class CharacterReader { 1781 abstract class CharacterReader {
1751 1782
1752 /** 1783 /**
(...skipping 532 matching lines...) Expand 10 before | Expand all | Expand 10 after
2285 * Return `true` if this token type represents an operator that can be defined by users. 2316 * Return `true` if this token type represents an operator that can be defined by users.
2286 * 2317 *
2287 * @return `true` if this token type represents an operator that can be define d by users 2318 * @return `true` if this token type represents an operator that can be define d by users
2288 */ 2319 */
2289 bool get isUserDefinableOperator => identical(lexeme, "==") || identical(lexem e, "~") || identical(lexeme, "[]") || identical(lexeme, "[]=") || identical(lexe me, "*") || identical(lexeme, "/") || identical(lexeme, "%") || identical(lexeme , "~/") || identical(lexeme, "+") || identical(lexeme, "-") || identical(lexeme, "<<") || identical(lexeme, ">>") || identical(lexeme, ">=") || identical(lexeme , ">") || identical(lexeme, "<=") || identical(lexeme, "<") || identical(lexeme, "&") || identical(lexeme, "^") || identical(lexeme, "|"); 2320 bool get isUserDefinableOperator => identical(lexeme, "==") || identical(lexem e, "~") || identical(lexeme, "[]") || identical(lexeme, "[]=") || identical(lexe me, "*") || identical(lexeme, "/") || identical(lexeme, "%") || identical(lexeme , "~/") || identical(lexeme, "+") || identical(lexeme, "-") || identical(lexeme, "<<") || identical(lexeme, ">>") || identical(lexeme, ">=") || identical(lexeme , ">") || identical(lexeme, "<=") || identical(lexeme, "<") || identical(lexeme, "&") || identical(lexeme, "^") || identical(lexeme, "|");
2290 } 2321 }
2291 class TokenType_EOF extends TokenType { 2322 class TokenType_EOF extends TokenType {
2292 TokenType_EOF(String name, int ordinal, TokenClass arg0, String arg1) : super. con2(name, ordinal, arg0, arg1); 2323 TokenType_EOF(String name, int ordinal, TokenClass arg0, String arg1) : super. con2(name, ordinal, arg0, arg1);
2293 String toString() => "-eof-"; 2324 String toString() => "-eof-";
2294 } 2325 }
OLDNEW
« no previous file with comments | « dart/pkg/analyzer/lib/src/generated/resolver.dart ('k') | dart/pkg/analyzer/pubspec.yaml » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698