Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(65)

Side by Side Diff: pkg/analyzer-experimental/lib/src/generated/scanner.dart

Issue 12253009: Fresh drop of analyzer-experimental. (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 7 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // This code was auto-generated, is not intended to be edited, and is subject to 1 // This code was auto-generated, is not intended to be edited, and is subject to
2 // significant change. Please see the README file for more information. 2 // significant change. Please see the README file for more information.
3 3
4 library engine.scanner; 4 library engine.scanner;
5 5
6 import 'dart:collection'; 6 import 'dart:collection';
7 import 'java_core.dart'; 7 import 'java_core.dart';
8 import 'source.dart'; 8 import 'source.dart';
9 import 'error.dart'; 9 import 'error.dart';
10 import 'instrumentation.dart'; 10 import 'instrumentation.dart';
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
85 result[keyword._syntax] = keyword; 85 result[keyword._syntax] = keyword;
86 } 86 }
87 return result; 87 return result;
88 } 88 }
89 /** 89 /**
90 * Initialize a newly created keyword to have the given syntax. The keyword is not a 90 * Initialize a newly created keyword to have the given syntax. The keyword is not a
91 * pseudo-keyword. 91 * pseudo-keyword.
92 * @param syntax the lexeme for the keyword 92 * @param syntax the lexeme for the keyword
93 */ 93 */
94 Keyword.con1(String ___name, int ___ordinal, String syntax) { 94 Keyword.con1(String ___name, int ___ordinal, String syntax) {
95 _jtd_constructor_215_impl(___name, ___ordinal, syntax); 95 _jtd_constructor_224_impl(___name, ___ordinal, syntax);
96 } 96 }
97 _jtd_constructor_215_impl(String ___name, int ___ordinal, String syntax) { 97 _jtd_constructor_224_impl(String ___name, int ___ordinal, String syntax) {
98 _jtd_constructor_216_impl(___name, ___ordinal, syntax, false); 98 _jtd_constructor_225_impl(___name, ___ordinal, syntax, false);
99 } 99 }
100 /** 100 /**
101 * Initialize a newly created keyword to have the given syntax. The keyword is a pseudo-keyword if 101 * Initialize a newly created keyword to have the given syntax. The keyword is a pseudo-keyword if
102 * the given flag is {@code true}. 102 * the given flag is {@code true}.
103 * @param syntax the lexeme for the keyword 103 * @param syntax the lexeme for the keyword
104 * @param isPseudoKeyword {@code true} if this keyword is a pseudo-keyword 104 * @param isPseudoKeyword {@code true} if this keyword is a pseudo-keyword
105 */ 105 */
106 Keyword.con2(String ___name, int ___ordinal, String syntax, bool isPseudoKeywo rd) { 106 Keyword.con2(String ___name, int ___ordinal, String syntax2, bool isPseudoKeyw ord) {
107 _jtd_constructor_216_impl(___name, ___ordinal, syntax, isPseudoKeyword); 107 _jtd_constructor_225_impl(___name, ___ordinal, syntax2, isPseudoKeyword);
108 } 108 }
109 _jtd_constructor_216_impl(String ___name, int ___ordinal, String syntax, bool isPseudoKeyword) { 109 _jtd_constructor_225_impl(String ___name, int ___ordinal, String syntax2, bool isPseudoKeyword) {
110 __name = ___name; 110 __name = ___name;
111 __ordinal = ___ordinal; 111 __ordinal = ___ordinal;
112 this._syntax = syntax; 112 this._syntax = syntax2;
113 this._isPseudoKeyword2 = isPseudoKeyword; 113 this._isPseudoKeyword2 = isPseudoKeyword;
114 } 114 }
115 /** 115 /**
116 * Return the lexeme for the keyword. 116 * Return the lexeme for the keyword.
117 * @return the lexeme for the keyword 117 * @return the lexeme for the keyword
118 */ 118 */
119 String get syntax => _syntax; 119 String get syntax => _syntax;
120 /** 120 /**
121 * Return {@code true} if this keyword is a pseudo-keyword. Pseudo keywords ca n be used as 121 * Return {@code true} if this keyword is a pseudo-keyword. Pseudo keywords ca n be used as
122 * identifiers. 122 * identifiers.
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after
281 */ 281 */
282 bool isUserDefinableOperator() => _type.isUserDefinableOperator(); 282 bool isUserDefinableOperator() => _type.isUserDefinableOperator();
283 /** 283 /**
284 * Set the next token in the token stream to the given token. This has the sid e-effect of setting 284 * Set the next token in the token stream to the given token. This has the sid e-effect of setting
285 * this token to be the previous token for the given token. 285 * this token to be the previous token for the given token.
286 * @param token the next token in the token stream 286 * @param token the next token in the token stream
287 * @return the token that was passed in 287 * @return the token that was passed in
288 */ 288 */
289 Token setNext(Token token) { 289 Token setNext(Token token) {
290 _next = token; 290 _next = token;
291 token.previous2 = this; 291 token.previous = this;
292 return token; 292 return token;
293 } 293 }
294 /** 294 /**
295 * Set the next token in the token stream to the given token without changing which token is the 295 * Set the next token in the token stream to the given token without changing which token is the
296 * previous token for the given token. 296 * previous token for the given token.
297 * @param token the next token in the token stream 297 * @param token the next token in the token stream
298 * @return the token that was passed in 298 * @return the token that was passed in
299 */ 299 */
300 Token setNextWithoutSettingPrevious(Token token) { 300 Token setNextWithoutSettingPrevious(Token token) {
301 _next = token; 301 _next = token;
302 return token; 302 return token;
303 } 303 }
304 /** 304 /**
305 * Set the offset from the beginning of the file to the first character in the token to the given 305 * Set the offset from the beginning of the file to the first character in the token to the given
306 * offset. 306 * offset.
307 * @param offset the offset from the beginning of the file to the first charac ter in the token 307 * @param offset the offset from the beginning of the file to the first charac ter in the token
308 */ 308 */
309 void set offset2(int offset) { 309 void set offset(int offset3) {
310 this._offset = offset; 310 this._offset = offset3;
311 } 311 }
312 String toString() => lexeme; 312 String toString() => lexeme;
313 /** 313 /**
314 * Return the value of this token. For keyword tokens, this is the keyword ass ociated with the 314 * Return the value of this token. For keyword tokens, this is the keyword ass ociated with the
315 * token, for other tokens it is the lexeme associated with the token. 315 * token, for other tokens it is the lexeme associated with the token.
316 * @return the value of this token 316 * @return the value of this token
317 */ 317 */
318 Object value() => _type.lexeme; 318 Object value() => _type.lexeme;
319 /** 319 /**
320 * Set the previous token in the token stream to the given token. 320 * Set the previous token in the token stream to the given token.
321 * @param previous the previous token in the token stream 321 * @param previous the previous token in the token stream
322 */ 322 */
323 void set previous2(Token previous) { 323 void set previous(Token previous2) {
324 this._previous = previous; 324 this._previous = previous2;
325 } 325 }
326 } 326 }
327 /** 327 /**
328 * Instances of the class {@code KeywordToken} represent a keyword in the langua ge. 328 * Instances of the class {@code KeywordToken} represent a keyword in the langua ge.
329 */ 329 */
330 class KeywordToken extends Token { 330 class KeywordToken extends Token {
331 /** 331 /**
332 * The keyword being represented by this token. 332 * The keyword being represented by this token.
333 */ 333 */
334 Keyword _keyword; 334 Keyword _keyword;
(...skipping 168 matching lines...) Expand 10 before | Expand all | Expand 10 after
503 /** 503 /**
504 * The class of the token. 504 * The class of the token.
505 */ 505 */
506 TokenClass _tokenClass; 506 TokenClass _tokenClass;
507 /** 507 /**
508 * The lexeme that defines this type of token, or {@code null} if there is mor e than one possible 508 * The lexeme that defines this type of token, or {@code null} if there is mor e than one possible
509 * lexeme for this type of token. 509 * lexeme for this type of token.
510 */ 510 */
511 String _lexeme; 511 String _lexeme;
512 TokenType.con1(String ___name, int ___ordinal) { 512 TokenType.con1(String ___name, int ___ordinal) {
513 _jtd_constructor_227_impl(___name, ___ordinal); 513 _jtd_constructor_236_impl(___name, ___ordinal);
514 } 514 }
515 _jtd_constructor_227_impl(String ___name, int ___ordinal) { 515 _jtd_constructor_236_impl(String ___name, int ___ordinal) {
516 _jtd_constructor_228_impl(___name, ___ordinal, TokenClass.NO_CLASS, null); 516 _jtd_constructor_237_impl(___name, ___ordinal, TokenClass.NO_CLASS, null);
517 } 517 }
518 TokenType.con2(String ___name, int ___ordinal, TokenClass tokenClass, String l exeme) { 518 TokenType.con2(String ___name, int ___ordinal, TokenClass tokenClass2, String lexeme2) {
519 _jtd_constructor_228_impl(___name, ___ordinal, tokenClass, lexeme); 519 _jtd_constructor_237_impl(___name, ___ordinal, tokenClass2, lexeme2);
520 } 520 }
521 _jtd_constructor_228_impl(String ___name, int ___ordinal, TokenClass tokenClas s, String lexeme) { 521 _jtd_constructor_237_impl(String ___name, int ___ordinal, TokenClass tokenClas s2, String lexeme2) {
522 __name = ___name; 522 __name = ___name;
523 __ordinal = ___ordinal; 523 __ordinal = ___ordinal;
524 this._tokenClass = tokenClass == null ? TokenClass.NO_CLASS : tokenClass; 524 this._tokenClass = tokenClass2 == null ? TokenClass.NO_CLASS : tokenClass2;
525 this._lexeme = lexeme; 525 this._lexeme = lexeme2;
526 } 526 }
527 /** 527 /**
528 * Return the lexeme that defines this type of token, or {@code null} if there is more than one 528 * Return the lexeme that defines this type of token, or {@code null} if there is more than one
529 * possible lexeme for this type of token. 529 * possible lexeme for this type of token.
530 * @return the lexeme that defines this type of token 530 * @return the lexeme that defines this type of token
531 */ 531 */
532 String get lexeme => _lexeme; 532 String get lexeme => _lexeme;
533 /** 533 /**
534 * Return the precedence of the token, or {@code 0} if the token does not repr esent an operator. 534 * Return the precedence of the token, or {@code 0} if the token does not repr esent an operator.
535 * @return the precedence of the token 535 * @return the precedence of the token
536 */ 536 */
537 int get precedence => _tokenClass.precedence; 537 int get precedence => _tokenClass.precedence;
538 /** 538 /**
539 * Return {@code true} if this type of token represents an additive operator. 539 * Return {@code true} if this type of token represents an additive operator.
540 * @return {@code true} if this type of token represents an additive operator 540 * @return {@code true} if this type of token represents an additive operator
541 */ 541 */
542 bool isAdditiveOperator() => _tokenClass == TokenClass.ADDITIVE_OPERATOR; 542 bool isAdditiveOperator() => identical(_tokenClass, TokenClass.ADDITIVE_OPERAT OR);
543 /** 543 /**
544 * Return {@code true} if this type of token represents an assignment operator . 544 * Return {@code true} if this type of token represents an assignment operator .
545 * @return {@code true} if this type of token represents an assignment operato r 545 * @return {@code true} if this type of token represents an assignment operato r
546 */ 546 */
547 bool isAssignmentOperator() => _tokenClass == TokenClass.ASSIGNMENT_OPERATOR; 547 bool isAssignmentOperator() => identical(_tokenClass, TokenClass.ASSIGNMENT_OP ERATOR);
548 /**
549 * Return {@code true} if this type of token represents an associative operato r. An associative
550 * operator is an operator for which the following equality is true:{@code (a * b) * c == a * (b * c)}. In other words, if the result of applying the operator to
551 * multiple operands does not depend on the order in which those applications occur.
552 * <p>
553 * Note: This method considers the logical-and and logical-or operators to be associative, even
554 * though the order in which the application of those operators can have an ef fect because
555 * evaluation of the right-hand operand is conditional.
556 * @return {@code true} if this type of token represents an associative operat or
557 */
558 bool isAssociativeOperator() => identical(this, AMPERSAND) || identical(this, AMPERSAND_AMPERSAND) || identical(this, BAR) || identical(this, BAR_BAR) || iden tical(this, CARET) || identical(this, PLUS) || identical(this, STAR);
548 /** 559 /**
549 * Return {@code true} if this type of token represents an equality operator. 560 * Return {@code true} if this type of token represents an equality operator.
550 * @return {@code true} if this type of token represents an equality operator 561 * @return {@code true} if this type of token represents an equality operator
551 */ 562 */
552 bool isEqualityOperator() => _tokenClass == TokenClass.EQUALITY_OPERATOR; 563 bool isEqualityOperator() => identical(_tokenClass, TokenClass.EQUALITY_OPERAT OR);
553 /** 564 /**
554 * Return {@code true} if this type of token represents an increment operator. 565 * Return {@code true} if this type of token represents an increment operator.
555 * @return {@code true} if this type of token represents an increment operator 566 * @return {@code true} if this type of token represents an increment operator
556 */ 567 */
557 bool isIncrementOperator() => _lexeme == "++" || _lexeme == "--"; 568 bool isIncrementOperator() => identical(_lexeme, "++") || identical(_lexeme, " --");
558 /** 569 /**
559 * Return {@code true} if this type of token represents a multiplicative opera tor. 570 * Return {@code true} if this type of token represents a multiplicative opera tor.
560 * @return {@code true} if this type of token represents a multiplicative oper ator 571 * @return {@code true} if this type of token represents a multiplicative oper ator
561 */ 572 */
562 bool isMultiplicativeOperator() => _tokenClass == TokenClass.MULTIPLICATIVE_OP ERATOR; 573 bool isMultiplicativeOperator() => identical(_tokenClass, TokenClass.MULTIPLIC ATIVE_OPERATOR);
563 /** 574 /**
564 * Return {@code true} if this token type represents an operator. 575 * Return {@code true} if this token type represents an operator.
565 * @return {@code true} if this token type represents an operator 576 * @return {@code true} if this token type represents an operator
566 */ 577 */
567 bool isOperator() => _tokenClass != TokenClass.NO_CLASS && this != TokenType.O PEN_PAREN && this != TokenType.OPEN_SQUARE_BRACKET && this != TokenType.PERIOD; 578 bool isOperator() => _tokenClass != TokenClass.NO_CLASS && this != OPEN_PAREN && this != OPEN_SQUARE_BRACKET && this != PERIOD;
568 /** 579 /**
569 * Return {@code true} if this type of token represents a relational operator. 580 * Return {@code true} if this type of token represents a relational operator.
570 * @return {@code true} if this type of token represents a relational operator 581 * @return {@code true} if this type of token represents a relational operator
571 */ 582 */
572 bool isRelationalOperator() => _tokenClass == TokenClass.RELATIONAL_OPERATOR; 583 bool isRelationalOperator() => identical(_tokenClass, TokenClass.RELATIONAL_OP ERATOR);
573 /** 584 /**
574 * Return {@code true} if this type of token represents a shift operator. 585 * Return {@code true} if this type of token represents a shift operator.
575 * @return {@code true} if this type of token represents a shift operator 586 * @return {@code true} if this type of token represents a shift operator
576 */ 587 */
577 bool isShiftOperator() => _tokenClass == TokenClass.SHIFT_OPERATOR; 588 bool isShiftOperator() => identical(_tokenClass, TokenClass.SHIFT_OPERATOR);
578 /** 589 /**
579 * Return {@code true} if this type of token represents a unary postfix operat or. 590 * Return {@code true} if this type of token represents a unary postfix operat or.
580 * @return {@code true} if this type of token represents a unary postfix opera tor 591 * @return {@code true} if this type of token represents a unary postfix opera tor
581 */ 592 */
582 bool isUnaryPostfixOperator() => _tokenClass == TokenClass.UNARY_POSTFIX_OPERA TOR; 593 bool isUnaryPostfixOperator() => identical(_tokenClass, TokenClass.UNARY_POSTF IX_OPERATOR);
583 /** 594 /**
584 * Return {@code true} if this type of token represents a unary prefix operato r. 595 * Return {@code true} if this type of token represents a unary prefix operato r.
585 * @return {@code true} if this type of token represents a unary prefix operat or 596 * @return {@code true} if this type of token represents a unary prefix operat or
586 */ 597 */
587 bool isUnaryPrefixOperator() => _tokenClass == TokenClass.UNARY_PREFIX_OPERATO R; 598 bool isUnaryPrefixOperator() => identical(_tokenClass, TokenClass.UNARY_PREFIX _OPERATOR);
588 /** 599 /**
589 * Return {@code true} if this token type represents an operator that can be d efined by users. 600 * Return {@code true} if this token type represents an operator that can be d efined by users.
590 * @return {@code true} if this token type represents an operator that can be defined by users 601 * @return {@code true} if this token type represents an operator that can be defined by users
591 */ 602 */
592 bool isUserDefinableOperator() => _lexeme == "==" || _lexeme == "~" || _lexeme == "[]" || _lexeme == "[]=" || _lexeme == "*" || _lexeme == "/" || _lexeme == " %" || _lexeme == "~/" || _lexeme == "+" || _lexeme == "-" || _lexeme == "<<" || _lexeme == ">>" || _lexeme == ">=" || _lexeme == ">" || _lexeme == "<=" || _lexe me == "<" || _lexeme == "&" || _lexeme == "^" || _lexeme == "|"; 603 bool isUserDefinableOperator() => identical(_lexeme, "==") || identical(_lexem e, "~") || identical(_lexeme, "[]") || identical(_lexeme, "[]=") || identical(_l exeme, "*") || identical(_lexeme, "/") || identical(_lexeme, "%") || identical(_ lexeme, "~/") || identical(_lexeme, "+") || identical(_lexeme, "-") || identical (_lexeme, "<<") || identical(_lexeme, ">>") || identical(_lexeme, ">=") || ident ical(_lexeme, ">") || identical(_lexeme, "<=") || identical(_lexeme, "<") || ide ntical(_lexeme, "&") || identical(_lexeme, "^") || identical(_lexeme, "|");
593 String toString() => __name; 604 String toString() => __name;
594 } 605 }
595 class TokenType_EOF extends TokenType { 606 class TokenType_EOF extends TokenType {
596 TokenType_EOF(String ___name, int ___ordinal, TokenClass arg0, String arg1) : super.con2(___name, ___ordinal, arg0, arg1); 607 TokenType_EOF(String ___name, int ___ordinal, TokenClass arg0, String arg1) : super.con2(___name, ___ordinal, arg0, arg1);
597 String toString() => "-eof-"; 608 String toString() => "-eof-";
598 } 609 }
599 /** 610 /**
600 * Instances of the class {@code TokenWithComment} represent a string token that is preceded by 611 * Instances of the class {@code TokenWithComment} represent a string token that is preceded by
601 * comments. 612 * comments.
602 */ 613 */
(...skipping 22 matching lines...) Expand all
625 /** 636 /**
626 * The token that corresponds to this token. 637 * The token that corresponds to this token.
627 */ 638 */
628 Token _endToken; 639 Token _endToken;
629 /** 640 /**
630 * Initialize a newly created token representing the opening half of a groupin g pair of tokens. 641 * Initialize a newly created token representing the opening half of a groupin g pair of tokens.
631 * @param type the type of the token 642 * @param type the type of the token
632 * @param offset the offset from the beginning of the file to the first charac ter in the token 643 * @param offset the offset from the beginning of the file to the first charac ter in the token
633 */ 644 */
634 BeginToken(TokenType type, int offset) : super(type, offset) { 645 BeginToken(TokenType type, int offset) : super(type, offset) {
635 assert((type == TokenType.OPEN_CURLY_BRACKET || type == TokenType.OPEN_PAREN || type == TokenType.OPEN_SQUARE_BRACKET || type == TokenType.STRING_INTERPOLAT ION_EXPRESSION)); 646 assert((identical(type, TokenType.OPEN_CURLY_BRACKET) || identical(type, Tok enType.OPEN_PAREN) || identical(type, TokenType.OPEN_SQUARE_BRACKET) || identica l(type, TokenType.STRING_INTERPOLATION_EXPRESSION)));
636 } 647 }
637 /** 648 /**
638 * Return the token that corresponds to this token. 649 * Return the token that corresponds to this token.
639 * @return the token that corresponds to this token 650 * @return the token that corresponds to this token
640 */ 651 */
641 Token get endToken => _endToken; 652 Token get endToken => _endToken;
642 /** 653 /**
643 * Set the token that corresponds to this token to the given token. 654 * Set the token that corresponds to this token to the given token.
644 * @param token the token that corresponds to this token 655 * @param token the token that corresponds to this token
645 */ 656 */
646 void set endToken2(Token token) { 657 void set endToken(Token token) {
647 this._endToken = token; 658 this._endToken = token;
648 } 659 }
649 } 660 }
650 /** 661 /**
651 * The enumeration {@code TokenClass} represents classes (or groups) of tokens w ith a similar use. 662 * The enumeration {@code TokenClass} represents classes (or groups) of tokens w ith a similar use.
652 */ 663 */
653 class TokenClass { 664 class TokenClass {
654 /** 665 /**
655 * A value used to indicate that the token type is not part of any specific cl ass of token. 666 * A value used to indicate that the token type is not part of any specific cl ass of token.
656 */ 667 */
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
717 static final TokenClass UNARY_PREFIX_OPERATOR = new TokenClass.con2('UNARY_PRE FIX_OPERATOR', 15, 14); 728 static final TokenClass UNARY_PREFIX_OPERATOR = new TokenClass.con2('UNARY_PRE FIX_OPERATOR', 15, 14);
718 static final List<TokenClass> values = [NO_CLASS, ADDITIVE_OPERATOR, ASSIGNMEN T_OPERATOR, BITWISE_AND_OPERATOR, BITWISE_OR_OPERATOR, BITWISE_XOR_OPERATOR, CAS CADE_OPERATOR, CONDITIONAL_OPERATOR, EQUALITY_OPERATOR, LOGICAL_AND_OPERATOR, LO GICAL_OR_OPERATOR, MULTIPLICATIVE_OPERATOR, RELATIONAL_OPERATOR, SHIFT_OPERATOR, UNARY_POSTFIX_OPERATOR, UNARY_PREFIX_OPERATOR]; 729 static final List<TokenClass> values = [NO_CLASS, ADDITIVE_OPERATOR, ASSIGNMEN T_OPERATOR, BITWISE_AND_OPERATOR, BITWISE_OR_OPERATOR, BITWISE_XOR_OPERATOR, CAS CADE_OPERATOR, CONDITIONAL_OPERATOR, EQUALITY_OPERATOR, LOGICAL_AND_OPERATOR, LO GICAL_OR_OPERATOR, MULTIPLICATIVE_OPERATOR, RELATIONAL_OPERATOR, SHIFT_OPERATOR, UNARY_POSTFIX_OPERATOR, UNARY_PREFIX_OPERATOR];
719 String __name; 730 String __name;
720 int __ordinal = 0; 731 int __ordinal = 0;
721 /** 732 /**
722 * The precedence of tokens of this class, or {@code 0} if the such tokens do not represent an 733 * The precedence of tokens of this class, or {@code 0} if the such tokens do not represent an
723 * operator. 734 * operator.
724 */ 735 */
725 int _precedence = 0; 736 int _precedence = 0;
726 TokenClass.con1(String ___name, int ___ordinal) { 737 TokenClass.con1(String ___name, int ___ordinal) {
727 _jtd_constructor_225_impl(___name, ___ordinal); 738 _jtd_constructor_234_impl(___name, ___ordinal);
728 } 739 }
729 _jtd_constructor_225_impl(String ___name, int ___ordinal) { 740 _jtd_constructor_234_impl(String ___name, int ___ordinal) {
730 _jtd_constructor_226_impl(___name, ___ordinal, 0); 741 _jtd_constructor_235_impl(___name, ___ordinal, 0);
731 } 742 }
732 TokenClass.con2(String ___name, int ___ordinal, int precedence) { 743 TokenClass.con2(String ___name, int ___ordinal, int precedence2) {
733 _jtd_constructor_226_impl(___name, ___ordinal, precedence); 744 _jtd_constructor_235_impl(___name, ___ordinal, precedence2);
734 } 745 }
735 _jtd_constructor_226_impl(String ___name, int ___ordinal, int precedence) { 746 _jtd_constructor_235_impl(String ___name, int ___ordinal, int precedence2) {
736 __name = ___name; 747 __name = ___name;
737 __ordinal = ___ordinal; 748 __ordinal = ___ordinal;
738 this._precedence = precedence; 749 this._precedence = precedence2;
739 } 750 }
740 /** 751 /**
741 * Return the precedence of tokens of this class, or {@code 0} if the such tok ens do not represent 752 * Return the precedence of tokens of this class, or {@code 0} if the such tok ens do not represent
742 * an operator. 753 * an operator.
743 * @return the precedence of tokens of this class 754 * @return the precedence of tokens of this class
744 */ 755 */
745 int get precedence => _precedence; 756 int get precedence => _precedence;
746 String toString() => __name; 757 String toString() => __name;
747 } 758 }
748 /** 759 /**
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
800 recordStartOfLine(); 811 recordStartOfLine();
801 } 812 }
802 _offsetDelta = offset - column + 1; 813 _offsetDelta = offset - column + 1;
803 recordStartOfLine(); 814 recordStartOfLine();
804 _offsetDelta = offset; 815 _offsetDelta = offset;
805 } 816 }
806 int advance() { 817 int advance() {
807 if (_charOffset + 1 >= _stringLength) { 818 if (_charOffset + 1 >= _stringLength) {
808 return -1; 819 return -1;
809 } 820 }
810 return _string.charCodeAt(++_charOffset); 821 return _string.codeUnitAt(++_charOffset);
811 } 822 }
812 String getString(int start, int endDelta) => _string.substring(start - _offset Delta, _charOffset + 1 + endDelta); 823 String getString(int start, int endDelta) => _string.substring(start - _offset Delta, _charOffset + 1 + endDelta);
813 int peek() { 824 int peek() {
814 if (_charOffset + 1 >= _string.length) { 825 if (_charOffset + 1 >= _string.length) {
815 return -1; 826 return -1;
816 } 827 }
817 return _string.charCodeAt(_charOffset + 1); 828 return _string.codeUnitAt(_charOffset + 1);
818 } 829 }
819 } 830 }
820 /** 831 /**
821 * The abstract class {@code AbstractScanner} implements a scanner for Dart code . Subclasses are 832 * The abstract class {@code AbstractScanner} implements a scanner for Dart code . Subclasses are
822 * required to implement the interface used to access the characters being scann ed. 833 * required to implement the interface used to access the characters being scann ed.
823 * <p> 834 * <p>
824 * The lexical structure of Dart is ambiguous without knowledge of the context i n which a token is 835 * The lexical structure of Dart is ambiguous without knowledge of the context i n which a token is
825 * being scanned. For example, without context we cannot determine whether sourc e of the form "<<" 836 * being scanned. For example, without context we cannot determine whether sourc e of the form "<<"
826 * should be scanned as a single left-shift operator or as two left angle bracke ts. This scanner 837 * should be scanned as a single left-shift operator or as two left angle bracke ts. This scanner
827 * does not have any context, so it always resolves such conflicts by scanning t he longest possible 838 * does not have any context, so it always resolves such conflicts by scanning t he longest possible
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after
958 _groupingStack.add(token); 969 _groupingStack.add(token);
959 } 970 }
960 void appendCommentToken(TokenType type, String value) { 971 void appendCommentToken(TokenType type, String value) {
961 if (_firstComment == null) { 972 if (_firstComment == null) {
962 _firstComment = new StringToken(type, value, _tokenStart); 973 _firstComment = new StringToken(type, value, _tokenStart);
963 _lastComment = _firstComment; 974 _lastComment = _firstComment;
964 } else { 975 } else {
965 _lastComment = _lastComment.setNext(new StringToken(type, value, _tokenSta rt)); 976 _lastComment = _lastComment.setNext(new StringToken(type, value, _tokenSta rt));
966 } 977 }
967 } 978 }
968 void appendEndToken(TokenType type, TokenType beginType) { 979 void appendEndToken(TokenType type26, TokenType beginType) {
969 Token token; 980 Token token;
970 if (_firstComment == null) { 981 if (_firstComment == null) {
971 token = new Token(type, _tokenStart); 982 token = new Token(type26, _tokenStart);
972 } else { 983 } else {
973 token = new TokenWithComment(type, _tokenStart, _firstComment); 984 token = new TokenWithComment(type26, _tokenStart, _firstComment);
974 _firstComment = null; 985 _firstComment = null;
975 _lastComment = null; 986 _lastComment = null;
976 } 987 }
977 _tail = _tail.setNext(token); 988 _tail = _tail.setNext(token);
978 int last = _groupingStack.length - 1; 989 int last = _groupingStack.length - 1;
979 if (last >= 0) { 990 if (last >= 0) {
980 BeginToken begin = _groupingStack[last]; 991 BeginToken begin = _groupingStack[last];
981 if (begin.type == beginType) { 992 if (identical(begin.type, beginType)) {
982 begin.endToken2 = token; 993 begin.endToken = token;
983 _groupingStack.removeAt(last); 994 _groupingStack.removeAt(last);
984 } 995 }
985 } 996 }
986 } 997 }
987 void appendEofToken() { 998 void appendEofToken() {
988 Token eofToken; 999 Token eofToken;
989 if (_firstComment == null) { 1000 if (_firstComment == null) {
990 eofToken = new Token(TokenType.EOF, offset + 1); 1001 eofToken = new Token(TokenType.EOF, offset + 1);
991 } else { 1002 } else {
992 eofToken = new TokenWithComment(TokenType.EOF, offset + 1, _firstComment); 1003 eofToken = new TokenWithComment(TokenType.EOF, offset + 1, _firstComment);
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
1042 _tail = _tail.setNext(new TokenWithComment(type, offset, _firstComment)); 1053 _tail = _tail.setNext(new TokenWithComment(type, offset, _firstComment));
1043 _firstComment = null; 1054 _firstComment = null;
1044 _lastComment = null; 1055 _lastComment = null;
1045 } 1056 }
1046 } 1057 }
1047 void beginToken() { 1058 void beginToken() {
1048 _tokenStart = offset; 1059 _tokenStart = offset;
1049 } 1060 }
1050 int bigSwitch(int next) { 1061 int bigSwitch(int next) {
1051 beginToken(); 1062 beginToken();
1052 if (next == 0xd) { 1063 if (next == 0xD) {
1053 next = advance(); 1064 next = advance();
1054 if (next == 0xa) { 1065 if (next == 0xA) {
1055 next = advance(); 1066 next = advance();
1056 } 1067 }
1057 recordStartOfLine(); 1068 recordStartOfLine();
1058 return next; 1069 return next;
1059 } else if (next == 0xa) { 1070 } else if (next == 0xA) {
1060 recordStartOfLine(); 1071 recordStartOfLine();
1061 return advance(); 1072 return advance();
1062 } else if (next == 0x9 || next == 0x20) { 1073 } else if (next == 0x9 || next == 0x20) {
1063 return advance(); 1074 return advance();
1064 } 1075 }
1065 if (next == 0x72) { 1076 if (next == 0x72) {
1066 int peek3 = peek(); 1077 int peek3 = peek();
1067 if (peek3 == 0x22 || peek3 == 0x27) { 1078 if (peek3 == 0x22 || peek3 == 0x27) {
1068 int start = offset; 1079 int start = offset;
1069 return tokenizeString(advance(), start, true); 1080 return tokenizeString(advance(), start, true);
1070 } 1081 }
1071 } 1082 }
1072 if (0x61 <= next && next <= 0x7a) { 1083 if (0x61 <= next && next <= 0x7A) {
1073 return tokenizeKeywordOrIdentifier(next, true); 1084 return tokenizeKeywordOrIdentifier(next, true);
1074 } 1085 }
1075 if ((0x41 <= next && next <= 0x5a) || next == 0x5f || next == 0x24) { 1086 if ((0x41 <= next && next <= 0x5A) || next == 0x5F || next == 0x24) {
1076 return tokenizeIdentifier(next, offset, true); 1087 return tokenizeIdentifier(next, offset, true);
1077 } 1088 }
1078 if (next == 0x3c) { 1089 if (next == 0x3C) {
1079 return tokenizeLessThan(next); 1090 return tokenizeLessThan(next);
1080 } 1091 }
1081 if (next == 0x3e) { 1092 if (next == 0x3E) {
1082 return tokenizeGreaterThan(next); 1093 return tokenizeGreaterThan(next);
1083 } 1094 }
1084 if (next == 0x3d) { 1095 if (next == 0x3D) {
1085 return tokenizeEquals(next); 1096 return tokenizeEquals(next);
1086 } 1097 }
1087 if (next == 0x21) { 1098 if (next == 0x21) {
1088 return tokenizeExclamation(next); 1099 return tokenizeExclamation(next);
1089 } 1100 }
1090 if (next == 0x2b) { 1101 if (next == 0x2B) {
1091 return tokenizePlus(next); 1102 return tokenizePlus(next);
1092 } 1103 }
1093 if (next == 0x2d) { 1104 if (next == 0x2D) {
1094 return tokenizeMinus(next); 1105 return tokenizeMinus(next);
1095 } 1106 }
1096 if (next == 0x2a) { 1107 if (next == 0x2A) {
1097 return tokenizeMultiply(next); 1108 return tokenizeMultiply(next);
1098 } 1109 }
1099 if (next == 0x25) { 1110 if (next == 0x25) {
1100 return tokenizePercent(next); 1111 return tokenizePercent(next);
1101 } 1112 }
1102 if (next == 0x26) { 1113 if (next == 0x26) {
1103 return tokenizeAmpersand(next); 1114 return tokenizeAmpersand(next);
1104 } 1115 }
1105 if (next == 0x7c) { 1116 if (next == 0x7C) {
1106 return tokenizeBar(next); 1117 return tokenizeBar(next);
1107 } 1118 }
1108 if (next == 0x5e) { 1119 if (next == 0x5E) {
1109 return tokenizeCaret(next); 1120 return tokenizeCaret(next);
1110 } 1121 }
1111 if (next == 0x5b) { 1122 if (next == 0x5B) {
1112 return tokenizeOpenSquareBracket(next); 1123 return tokenizeOpenSquareBracket(next);
1113 } 1124 }
1114 if (next == 0x7e) { 1125 if (next == 0x7E) {
1115 return tokenizeTilde(next); 1126 return tokenizeTilde(next);
1116 } 1127 }
1117 if (next == 0x5c) { 1128 if (next == 0x5C) {
1118 appendToken(TokenType.BACKSLASH); 1129 appendToken(TokenType.BACKSLASH);
1119 return advance(); 1130 return advance();
1120 } 1131 }
1121 if (next == 0x23) { 1132 if (next == 0x23) {
1122 return tokenizeTag(next); 1133 return tokenizeTag(next);
1123 } 1134 }
1124 if (next == 0x28) { 1135 if (next == 0x28) {
1125 appendBeginToken(TokenType.OPEN_PAREN); 1136 appendBeginToken(TokenType.OPEN_PAREN);
1126 return advance(); 1137 return advance();
1127 } 1138 }
1128 if (next == 0x29) { 1139 if (next == 0x29) {
1129 appendEndToken(TokenType.CLOSE_PAREN, TokenType.OPEN_PAREN); 1140 appendEndToken(TokenType.CLOSE_PAREN, TokenType.OPEN_PAREN);
1130 return advance(); 1141 return advance();
1131 } 1142 }
1132 if (next == 0x2c) { 1143 if (next == 0x2C) {
1133 appendToken(TokenType.COMMA); 1144 appendToken(TokenType.COMMA);
1134 return advance(); 1145 return advance();
1135 } 1146 }
1136 if (next == 0x3a) { 1147 if (next == 0x3A) {
1137 appendToken(TokenType.COLON); 1148 appendToken(TokenType.COLON);
1138 return advance(); 1149 return advance();
1139 } 1150 }
1140 if (next == 0x3b) { 1151 if (next == 0x3B) {
1141 appendToken(TokenType.SEMICOLON); 1152 appendToken(TokenType.SEMICOLON);
1142 return advance(); 1153 return advance();
1143 } 1154 }
1144 if (next == 0x3f) { 1155 if (next == 0x3F) {
1145 appendToken(TokenType.QUESTION); 1156 appendToken(TokenType.QUESTION);
1146 return advance(); 1157 return advance();
1147 } 1158 }
1148 if (next == 0x5d) { 1159 if (next == 0x5D) {
1149 appendEndToken(TokenType.CLOSE_SQUARE_BRACKET, TokenType.OPEN_SQUARE_BRACK ET); 1160 appendEndToken(TokenType.CLOSE_SQUARE_BRACKET, TokenType.OPEN_SQUARE_BRACK ET);
1150 return advance(); 1161 return advance();
1151 } 1162 }
1152 if (next == 0x60) { 1163 if (next == 0x60) {
1153 appendToken(TokenType.BACKPING); 1164 appendToken(TokenType.BACKPING);
1154 return advance(); 1165 return advance();
1155 } 1166 }
1156 if (next == 0x7b) { 1167 if (next == 0x7B) {
1157 appendBeginToken(TokenType.OPEN_CURLY_BRACKET); 1168 appendBeginToken(TokenType.OPEN_CURLY_BRACKET);
1158 return advance(); 1169 return advance();
1159 } 1170 }
1160 if (next == 0x7d) { 1171 if (next == 0x7D) {
1161 appendEndToken(TokenType.CLOSE_CURLY_BRACKET, TokenType.OPEN_CURLY_BRACKET ); 1172 appendEndToken(TokenType.CLOSE_CURLY_BRACKET, TokenType.OPEN_CURLY_BRACKET );
1162 return advance(); 1173 return advance();
1163 } 1174 }
1164 if (next == 0x2f) { 1175 if (next == 0x2F) {
1165 return tokenizeSlashOrComment(next); 1176 return tokenizeSlashOrComment(next);
1166 } 1177 }
1167 if (next == 0x40) { 1178 if (next == 0x40) {
1168 appendToken(TokenType.AT); 1179 appendToken(TokenType.AT);
1169 return advance(); 1180 return advance();
1170 } 1181 }
1171 if (next == 0x22 || next == 0x27) { 1182 if (next == 0x22 || next == 0x27) {
1172 return tokenizeString(next, offset, false); 1183 return tokenizeString(next, offset, false);
1173 } 1184 }
1174 if (next == 0x2e) { 1185 if (next == 0x2E) {
1175 return tokenizeDotOrNumber(next); 1186 return tokenizeDotOrNumber(next);
1176 } 1187 }
1177 if (next == 0x30) { 1188 if (next == 0x30) {
1178 return tokenizeHexOrNumber(next); 1189 return tokenizeHexOrNumber(next);
1179 } 1190 }
1180 if (0x31 <= next && next <= 0x39) { 1191 if (0x31 <= next && next <= 0x39) {
1181 return tokenizeNumber(next); 1192 return tokenizeNumber(next);
1182 } 1193 }
1183 if (next == -1) { 1194 if (next == -1) {
1184 return -1; 1195 return -1;
1185 } 1196 }
1186 if (Character.isLetter(next)) { 1197 if (Character.isLetter(next)) {
1187 return tokenizeIdentifier(next, offset, true); 1198 return tokenizeIdentifier(next, offset, true);
1188 } 1199 }
1189 if (next == AbstractScanner._$NBSP) { 1200 if (next == _$NBSP) {
1190 return advance(); 1201 return advance();
1191 } 1202 }
1192 reportError(ScannerErrorCode.ILLEGAL_CHARACTER, [next]); 1203 reportError(ScannerErrorCode.ILLEGAL_CHARACTER, [next]);
1193 return advance(); 1204 return advance();
1194 } 1205 }
1195 /** 1206 /**
1196 * Return the beginning token corresponding to a closing brace that was found while scanning 1207 * Return the beginning token corresponding to a closing brace that was found while scanning
1197 * inside a string interpolation expression. Tokens that cannot be matched wit h the closing brace 1208 * inside a string interpolation expression. Tokens that cannot be matched wit h the closing brace
1198 * will be dropped from the stack. 1209 * will be dropped from the stack.
1199 * @return the token to be paired with the closing brace 1210 * @return the token to be paired with the closing brace
1200 */ 1211 */
1201 BeginToken findTokenMatchingClosingBraceInInterpolationExpression() { 1212 BeginToken findTokenMatchingClosingBraceInInterpolationExpression() {
1202 int last = _groupingStack.length - 1; 1213 int last = _groupingStack.length - 1;
1203 while (last >= 0) { 1214 while (last >= 0) {
1204 BeginToken begin = _groupingStack[last]; 1215 BeginToken begin = _groupingStack[last];
1205 if (begin.type == TokenType.OPEN_CURLY_BRACKET || begin.type == TokenType. STRING_INTERPOLATION_EXPRESSION) { 1216 if (identical(begin.type, TokenType.OPEN_CURLY_BRACKET) || identical(begin .type, TokenType.STRING_INTERPOLATION_EXPRESSION)) {
1206 return begin; 1217 return begin;
1207 } 1218 }
1208 _hasUnmatchedGroups2 = true; 1219 _hasUnmatchedGroups2 = true;
1209 _groupingStack.removeAt(last); 1220 _groupingStack.removeAt(last);
1210 last--; 1221 last--;
1211 } 1222 }
1212 return null; 1223 return null;
1213 } 1224 }
1214 Token firstToken() => _tokens.next; 1225 Token firstToken() => _tokens.next;
1215 /** 1226 /**
(...skipping 27 matching lines...) Expand all
1243 } else { 1254 } else {
1244 appendToken2(noType, offset); 1255 appendToken2(noType, offset);
1245 return next; 1256 return next;
1246 } 1257 }
1247 } 1258 }
1248 int tokenizeAmpersand(int next) { 1259 int tokenizeAmpersand(int next) {
1249 next = advance(); 1260 next = advance();
1250 if (next == 0x26) { 1261 if (next == 0x26) {
1251 appendToken(TokenType.AMPERSAND_AMPERSAND); 1262 appendToken(TokenType.AMPERSAND_AMPERSAND);
1252 return advance(); 1263 return advance();
1253 } else if (next == 0x3d) { 1264 } else if (next == 0x3D) {
1254 appendToken(TokenType.AMPERSAND_EQ); 1265 appendToken(TokenType.AMPERSAND_EQ);
1255 return advance(); 1266 return advance();
1256 } else { 1267 } else {
1257 appendToken(TokenType.AMPERSAND); 1268 appendToken(TokenType.AMPERSAND);
1258 return next; 1269 return next;
1259 } 1270 }
1260 } 1271 }
1261 int tokenizeBar(int next) { 1272 int tokenizeBar(int next) {
1262 next = advance(); 1273 next = advance();
1263 if (next == 0x7c) { 1274 if (next == 0x7C) {
1264 appendToken(TokenType.BAR_BAR); 1275 appendToken(TokenType.BAR_BAR);
1265 return advance(); 1276 return advance();
1266 } else if (next == 0x3d) { 1277 } else if (next == 0x3D) {
1267 appendToken(TokenType.BAR_EQ); 1278 appendToken(TokenType.BAR_EQ);
1268 return advance(); 1279 return advance();
1269 } else { 1280 } else {
1270 appendToken(TokenType.BAR); 1281 appendToken(TokenType.BAR);
1271 return next; 1282 return next;
1272 } 1283 }
1273 } 1284 }
1274 int tokenizeCaret(int next) => select(0x3d, TokenType.CARET_EQ, TokenType.CARE T); 1285 int tokenizeCaret(int next) => select(0x3D, TokenType.CARET_EQ, TokenType.CARE T);
1275 int tokenizeDotOrNumber(int next) { 1286 int tokenizeDotOrNumber(int next) {
1276 int start = offset; 1287 int start = offset;
1277 next = advance(); 1288 next = advance();
1278 if ((0x30 <= next && next <= 0x39)) { 1289 if ((0x30 <= next && next <= 0x39)) {
1279 return tokenizeFractionPart(next, start); 1290 return tokenizeFractionPart(next, start);
1280 } else if (0x2e == next) { 1291 } else if (0x2E == next) {
1281 return select(0x2e, TokenType.PERIOD_PERIOD_PERIOD, TokenType.PERIOD_PERIO D); 1292 return select(0x2E, TokenType.PERIOD_PERIOD_PERIOD, TokenType.PERIOD_PERIO D);
1282 } else { 1293 } else {
1283 appendToken(TokenType.PERIOD); 1294 appendToken(TokenType.PERIOD);
1284 return next; 1295 return next;
1285 } 1296 }
1286 } 1297 }
1287 int tokenizeEquals(int next) { 1298 int tokenizeEquals(int next) {
1288 next = advance(); 1299 next = advance();
1289 if (next == 0x3d) { 1300 if (next == 0x3D) {
1290 appendToken(TokenType.EQ_EQ); 1301 appendToken(TokenType.EQ_EQ);
1291 return advance(); 1302 return advance();
1292 } else if (next == 0x3e) { 1303 } else if (next == 0x3E) {
1293 appendToken(TokenType.FUNCTION); 1304 appendToken(TokenType.FUNCTION);
1294 return advance(); 1305 return advance();
1295 } 1306 }
1296 appendToken(TokenType.EQ); 1307 appendToken(TokenType.EQ);
1297 return next; 1308 return next;
1298 } 1309 }
1299 int tokenizeExclamation(int next) { 1310 int tokenizeExclamation(int next) {
1300 next = advance(); 1311 next = advance();
1301 if (next == 0x3d) { 1312 if (next == 0x3D) {
1302 appendToken(TokenType.BANG_EQ); 1313 appendToken(TokenType.BANG_EQ);
1303 return advance(); 1314 return advance();
1304 } 1315 }
1305 appendToken(TokenType.BANG); 1316 appendToken(TokenType.BANG);
1306 return next; 1317 return next;
1307 } 1318 }
1308 int tokenizeExponent(int next) { 1319 int tokenizeExponent(int next) {
1309 if (next == 0x2b || next == 0x2d) { 1320 if (next == 0x2B || next == 0x2D) {
1310 next = advance(); 1321 next = advance();
1311 } 1322 }
1312 bool hasDigits = false; 1323 bool hasDigits = false;
1313 while (true) { 1324 while (true) {
1314 if (0x30 <= next && next <= 0x39) { 1325 if (0x30 <= next && next <= 0x39) {
1315 hasDigits = true; 1326 hasDigits = true;
1316 } else { 1327 } else {
1317 if (!hasDigits) { 1328 if (!hasDigits) {
1318 reportError(ScannerErrorCode.MISSING_DIGIT, []); 1329 reportError(ScannerErrorCode.MISSING_DIGIT, []);
1319 } 1330 }
(...skipping 14 matching lines...) Expand all
1334 done = true; 1345 done = true;
1335 continue LOOP; 1346 continue LOOP;
1336 } else { 1347 } else {
1337 done = true; 1348 done = true;
1338 continue LOOP; 1349 continue LOOP;
1339 } 1350 }
1340 next = advance(); 1351 next = advance();
1341 } 1352 }
1342 if (!hasDigit) { 1353 if (!hasDigit) {
1343 appendStringToken(TokenType.INT, getString(start, -2)); 1354 appendStringToken(TokenType.INT, getString(start, -2));
1344 if (0x2e == next) { 1355 if (0x2E == next) {
1345 return select2(0x2e, TokenType.PERIOD_PERIOD_PERIOD, TokenType.PERIOD_PE RIOD, offset - 1); 1356 return select2(0x2E, TokenType.PERIOD_PERIOD_PERIOD, TokenType.PERIOD_PE RIOD, offset - 1);
1346 } 1357 }
1347 appendToken2(TokenType.PERIOD, offset - 1); 1358 appendToken2(TokenType.PERIOD, offset - 1);
1348 return bigSwitch(next); 1359 return bigSwitch(next);
1349 } 1360 }
1350 if (next == 0x64 || next == 0x44) { 1361 if (next == 0x64 || next == 0x44) {
1351 next = advance(); 1362 next = advance();
1352 } 1363 }
1353 appendStringToken(TokenType.DOUBLE, getString(start, next < 0 ? 0 : -1)); 1364 appendStringToken(TokenType.DOUBLE, getString(start, next < 0 ? 0 : -1));
1354 return next; 1365 return next;
1355 } 1366 }
1356 int tokenizeGreaterThan(int next) { 1367 int tokenizeGreaterThan(int next) {
1357 next = advance(); 1368 next = advance();
1358 if (0x3d == next) { 1369 if (0x3D == next) {
1359 appendToken(TokenType.GT_EQ); 1370 appendToken(TokenType.GT_EQ);
1360 return advance(); 1371 return advance();
1361 } else if (0x3e == next) { 1372 } else if (0x3E == next) {
1362 next = advance(); 1373 next = advance();
1363 if (0x3d == next) { 1374 if (0x3D == next) {
1364 appendToken(TokenType.GT_GT_EQ); 1375 appendToken(TokenType.GT_GT_EQ);
1365 return advance(); 1376 return advance();
1366 } else { 1377 } else {
1367 appendToken(TokenType.GT_GT); 1378 appendToken(TokenType.GT_GT);
1368 return next; 1379 return next;
1369 } 1380 }
1370 } else { 1381 } else {
1371 appendToken(TokenType.GT); 1382 appendToken(TokenType.GT);
1372 return next; 1383 return next;
1373 } 1384 }
(...skipping 16 matching lines...) Expand all
1390 } 1401 }
1391 int tokenizeHexOrNumber(int next) { 1402 int tokenizeHexOrNumber(int next) {
1392 int x = peek(); 1403 int x = peek();
1393 if (x == 0x78 || x == 0x58) { 1404 if (x == 0x78 || x == 0x58) {
1394 advance(); 1405 advance();
1395 return tokenizeHex(x); 1406 return tokenizeHex(x);
1396 } 1407 }
1397 return tokenizeNumber(next); 1408 return tokenizeNumber(next);
1398 } 1409 }
1399 int tokenizeIdentifier(int next, int start, bool allowDollar) { 1410 int tokenizeIdentifier(int next, int start, bool allowDollar) {
1400 while ((0x61 <= next && next <= 0x7a) || (0x41 <= next && next <= 0x5a) || ( 0x30 <= next && next <= 0x39) || next == 0x5f || (next == 0x24 && allowDollar) | | Character.isLetterOrDigit(next)) { 1411 while ((0x61 <= next && next <= 0x7A) || (0x41 <= next && next <= 0x5A) || ( 0x30 <= next && next <= 0x39) || next == 0x5F || (next == 0x24 && allowDollar) | | Character.isLetterOrDigit(next)) {
1401 next = advance(); 1412 next = advance();
1402 } 1413 }
1403 appendStringToken(TokenType.IDENTIFIER, getString(start, next < 0 ? 0 : -1)) ; 1414 appendStringToken(TokenType.IDENTIFIER, getString(start, next < 0 ? 0 : -1)) ;
1404 return next; 1415 return next;
1405 } 1416 }
1406 int tokenizeInterpolatedExpression(int next, int start) { 1417 int tokenizeInterpolatedExpression(int next, int start) {
1407 appendBeginToken(TokenType.STRING_INTERPOLATION_EXPRESSION); 1418 appendBeginToken(TokenType.STRING_INTERPOLATION_EXPRESSION);
1408 next = advance(); 1419 next = advance();
1409 while (next != -1) { 1420 while (next != -1) {
1410 if (next == 0x7d) { 1421 if (next == 0x7D) {
1411 BeginToken begin = findTokenMatchingClosingBraceInInterpolationExpressio n(); 1422 BeginToken begin = findTokenMatchingClosingBraceInInterpolationExpressio n();
1412 if (begin == null) { 1423 if (begin == null) {
1413 beginToken(); 1424 beginToken();
1414 appendToken(TokenType.CLOSE_CURLY_BRACKET); 1425 appendToken(TokenType.CLOSE_CURLY_BRACKET);
1415 next = advance(); 1426 next = advance();
1416 beginToken(); 1427 beginToken();
1417 return next; 1428 return next;
1418 } else if (begin.type == TokenType.OPEN_CURLY_BRACKET) { 1429 } else if (identical(begin.type, TokenType.OPEN_CURLY_BRACKET)) {
1419 beginToken(); 1430 beginToken();
1420 appendEndToken(TokenType.CLOSE_CURLY_BRACKET, TokenType.OPEN_CURLY_BRA CKET); 1431 appendEndToken(TokenType.CLOSE_CURLY_BRACKET, TokenType.OPEN_CURLY_BRA CKET);
1421 next = advance(); 1432 next = advance();
1422 beginToken(); 1433 beginToken();
1423 } else if (begin.type == TokenType.STRING_INTERPOLATION_EXPRESSION) { 1434 } else if (identical(begin.type, TokenType.STRING_INTERPOLATION_EXPRESSI ON)) {
1424 beginToken(); 1435 beginToken();
1425 appendEndToken(TokenType.CLOSE_CURLY_BRACKET, TokenType.STRING_INTERPO LATION_EXPRESSION); 1436 appendEndToken(TokenType.CLOSE_CURLY_BRACKET, TokenType.STRING_INTERPO LATION_EXPRESSION);
1426 next = advance(); 1437 next = advance();
1427 beginToken(); 1438 beginToken();
1428 return next; 1439 return next;
1429 } 1440 }
1430 } else { 1441 } else {
1431 next = bigSwitch(next); 1442 next = bigSwitch(next);
1432 } 1443 }
1433 } 1444 }
1434 if (next == -1) { 1445 if (next == -1) {
1435 return next; 1446 return next;
1436 } 1447 }
1437 next = advance(); 1448 next = advance();
1438 beginToken(); 1449 beginToken();
1439 return next; 1450 return next;
1440 } 1451 }
1441 int tokenizeInterpolatedIdentifier(int next, int start) { 1452 int tokenizeInterpolatedIdentifier(int next, int start) {
1442 appendStringToken2(TokenType.STRING_INTERPOLATION_IDENTIFIER, "\$", 0); 1453 appendStringToken2(TokenType.STRING_INTERPOLATION_IDENTIFIER, "\$", 0);
1443 beginToken(); 1454 beginToken();
1444 next = tokenizeKeywordOrIdentifier(next, false); 1455 next = tokenizeKeywordOrIdentifier(next, false);
1445 beginToken(); 1456 beginToken();
1446 return next; 1457 return next;
1447 } 1458 }
1448 int tokenizeKeywordOrIdentifier(int next, bool allowDollar) { 1459 int tokenizeKeywordOrIdentifier(int next2, bool allowDollar) {
1449 KeywordState state = KeywordState.KEYWORD_STATE; 1460 KeywordState state = KeywordState.KEYWORD_STATE;
1450 int start = offset; 1461 int start = offset;
1451 while (state != null && 0x61 <= next && next <= 0x7a) { 1462 while (state != null && 0x61 <= next2 && next2 <= 0x7A) {
1452 state = state.next(next as int); 1463 state = state.next((next2 as int));
1453 next = advance(); 1464 next2 = advance();
1454 } 1465 }
1455 if (state == null || state.keyword() == null) { 1466 if (state == null || state.keyword() == null) {
1456 return tokenizeIdentifier(next, start, allowDollar); 1467 return tokenizeIdentifier(next2, start, allowDollar);
1457 } 1468 }
1458 if ((0x41 <= next && next <= 0x5a) || (0x30 <= next && next <= 0x39) || next == 0x5f || next == 0x24) { 1469 if ((0x41 <= next2 && next2 <= 0x5A) || (0x30 <= next2 && next2 <= 0x39) || next2 == 0x5F || next2 == 0x24) {
1459 return tokenizeIdentifier(next, start, allowDollar); 1470 return tokenizeIdentifier(next2, start, allowDollar);
1460 } else if (next < 128) { 1471 } else if (next2 < 128) {
1461 appendKeywordToken(state.keyword()); 1472 appendKeywordToken(state.keyword());
1462 return next; 1473 return next2;
1463 } else { 1474 } else {
1464 return tokenizeIdentifier(next, start, allowDollar); 1475 return tokenizeIdentifier(next2, start, allowDollar);
1465 } 1476 }
1466 } 1477 }
1467 int tokenizeLessThan(int next) { 1478 int tokenizeLessThan(int next) {
1468 next = advance(); 1479 next = advance();
1469 if (0x3d == next) { 1480 if (0x3D == next) {
1470 appendToken(TokenType.LT_EQ); 1481 appendToken(TokenType.LT_EQ);
1471 return advance(); 1482 return advance();
1472 } else if (0x3c == next) { 1483 } else if (0x3C == next) {
1473 return select(0x3d, TokenType.LT_LT_EQ, TokenType.LT_LT); 1484 return select(0x3D, TokenType.LT_LT_EQ, TokenType.LT_LT);
1474 } else { 1485 } else {
1475 appendToken(TokenType.LT); 1486 appendToken(TokenType.LT);
1476 return next; 1487 return next;
1477 } 1488 }
1478 } 1489 }
1479 int tokenizeMinus(int next) { 1490 int tokenizeMinus(int next) {
1480 next = advance(); 1491 next = advance();
1481 if (next == 0x2d) { 1492 if (next == 0x2D) {
1482 appendToken(TokenType.MINUS_MINUS); 1493 appendToken(TokenType.MINUS_MINUS);
1483 return advance(); 1494 return advance();
1484 } else if (next == 0x3d) { 1495 } else if (next == 0x3D) {
1485 appendToken(TokenType.MINUS_EQ); 1496 appendToken(TokenType.MINUS_EQ);
1486 return advance(); 1497 return advance();
1487 } else { 1498 } else {
1488 appendToken(TokenType.MINUS); 1499 appendToken(TokenType.MINUS);
1489 return next; 1500 return next;
1490 } 1501 }
1491 } 1502 }
1492 int tokenizeMultiLineComment(int next) { 1503 int tokenizeMultiLineComment(int next) {
1493 int nesting = 1; 1504 int nesting = 1;
1494 next = advance(); 1505 next = advance();
1495 while (true) { 1506 while (true) {
1496 if (-1 == next) { 1507 if (-1 == next) {
1497 reportError(ScannerErrorCode.UNTERMINATED_MULTI_LINE_COMMENT, []); 1508 reportError(ScannerErrorCode.UNTERMINATED_MULTI_LINE_COMMENT, []);
1498 appendCommentToken(TokenType.MULTI_LINE_COMMENT, getString(_tokenStart, 0)); 1509 appendCommentToken(TokenType.MULTI_LINE_COMMENT, getString(_tokenStart, 0));
1499 return next; 1510 return next;
1500 } else if (0x2a == next) { 1511 } else if (0x2A == next) {
1501 next = advance(); 1512 next = advance();
1502 if (0x2f == next) { 1513 if (0x2F == next) {
1503 --nesting; 1514 --nesting;
1504 if (0 == nesting) { 1515 if (0 == nesting) {
1505 appendCommentToken(TokenType.MULTI_LINE_COMMENT, getString(_tokenSta rt, 0)); 1516 appendCommentToken(TokenType.MULTI_LINE_COMMENT, getString(_tokenSta rt, 0));
1506 return advance(); 1517 return advance();
1507 } else { 1518 } else {
1508 next = advance(); 1519 next = advance();
1509 } 1520 }
1510 } 1521 }
1511 } else if (0x2f == next) { 1522 } else if (0x2F == next) {
1512 next = advance(); 1523 next = advance();
1513 if (0x2a == next) { 1524 if (0x2A == next) {
1514 next = advance(); 1525 next = advance();
1515 ++nesting; 1526 ++nesting;
1516 } 1527 }
1517 } else { 1528 } else {
1518 next = advance(); 1529 next = advance();
1519 } 1530 }
1520 } 1531 }
1521 } 1532 }
1522 int tokenizeMultiLineRawString(int quoteChar, int start) { 1533 int tokenizeMultiLineRawString(int quoteChar, int start) {
1523 int next = advance(); 1534 int next = advance();
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
1558 next = advance(); 1569 next = advance();
1559 if (next == quoteChar) { 1570 if (next == quoteChar) {
1560 next = advance(); 1571 next = advance();
1561 if (next == quoteChar) { 1572 if (next == quoteChar) {
1562 appendStringToken(TokenType.STRING, getString(start, 0)); 1573 appendStringToken(TokenType.STRING, getString(start, 0));
1563 return advance(); 1574 return advance();
1564 } 1575 }
1565 } 1576 }
1566 continue; 1577 continue;
1567 } 1578 }
1568 if (next == 0x5c) { 1579 if (next == 0x5C) {
1569 next = advance(); 1580 next = advance();
1570 if (next == -1) { 1581 if (next == -1) {
1571 break; 1582 break;
1572 } 1583 }
1573 } 1584 }
1574 next = advance(); 1585 next = advance();
1575 } 1586 }
1576 reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL, []); 1587 reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL, []);
1577 appendStringToken(TokenType.STRING, getString(start, 0)); 1588 appendStringToken(TokenType.STRING, getString(start, 0));
1578 return advance(); 1589 return advance();
1579 } 1590 }
1580 int tokenizeMultiply(int next) => select(0x3d, TokenType.STAR_EQ, TokenType.ST AR); 1591 int tokenizeMultiply(int next) => select(0x3D, TokenType.STAR_EQ, TokenType.ST AR);
1581 int tokenizeNumber(int next) { 1592 int tokenizeNumber(int next) {
1582 int start = offset; 1593 int start = offset;
1583 while (true) { 1594 while (true) {
1584 next = advance(); 1595 next = advance();
1585 if (0x30 <= next && next <= 0x39) { 1596 if (0x30 <= next && next <= 0x39) {
1586 continue; 1597 continue;
1587 } else if (next == 0x2e) { 1598 } else if (next == 0x2E) {
1588 return tokenizeFractionPart(advance(), start); 1599 return tokenizeFractionPart(advance(), start);
1589 } else if (next == 0x64 || next == 0x44) { 1600 } else if (next == 0x64 || next == 0x44) {
1590 appendStringToken(TokenType.DOUBLE, getString(start, 0)); 1601 appendStringToken(TokenType.DOUBLE, getString(start, 0));
1591 return advance(); 1602 return advance();
1592 } else if (next == 0x65 || next == 0x45) { 1603 } else if (next == 0x65 || next == 0x45) {
1593 return tokenizeFractionPart(next, start); 1604 return tokenizeFractionPart(next, start);
1594 } else { 1605 } else {
1595 appendStringToken(TokenType.INT, getString(start, next < 0 ? 0 : -1)); 1606 appendStringToken(TokenType.INT, getString(start, next < 0 ? 0 : -1));
1596 return next; 1607 return next;
1597 } 1608 }
1598 } 1609 }
1599 } 1610 }
1600 int tokenizeOpenSquareBracket(int next) { 1611 int tokenizeOpenSquareBracket(int next) {
1601 next = advance(); 1612 next = advance();
1602 if (next == 0x5d) { 1613 if (next == 0x5D) {
1603 return select(0x3d, TokenType.INDEX_EQ, TokenType.INDEX); 1614 return select(0x3D, TokenType.INDEX_EQ, TokenType.INDEX);
1604 } else { 1615 } else {
1605 appendBeginToken(TokenType.OPEN_SQUARE_BRACKET); 1616 appendBeginToken(TokenType.OPEN_SQUARE_BRACKET);
1606 return next; 1617 return next;
1607 } 1618 }
1608 } 1619 }
1609 int tokenizePercent(int next) => select(0x3d, TokenType.PERCENT_EQ, TokenType. PERCENT); 1620 int tokenizePercent(int next) => select(0x3D, TokenType.PERCENT_EQ, TokenType. PERCENT);
1610 int tokenizePlus(int next) { 1621 int tokenizePlus(int next) {
1611 next = advance(); 1622 next = advance();
1612 if (0x2b == next) { 1623 if (0x2B == next) {
1613 appendToken(TokenType.PLUS_PLUS); 1624 appendToken(TokenType.PLUS_PLUS);
1614 return advance(); 1625 return advance();
1615 } else if (0x3d == next) { 1626 } else if (0x3D == next) {
1616 appendToken(TokenType.PLUS_EQ); 1627 appendToken(TokenType.PLUS_EQ);
1617 return advance(); 1628 return advance();
1618 } else { 1629 } else {
1619 appendToken(TokenType.PLUS); 1630 appendToken(TokenType.PLUS);
1620 return next; 1631 return next;
1621 } 1632 }
1622 } 1633 }
1623 int tokenizeSingleLineComment(int next) { 1634 int tokenizeSingleLineComment(int next) {
1624 while (true) { 1635 while (true) {
1625 next = advance(); 1636 next = advance();
1626 if (0xa == next || 0xd == next || -1 == next) { 1637 if (0xA == next || 0xD == next || -1 == next) {
1627 appendCommentToken(TokenType.SINGLE_LINE_COMMENT, getString(_tokenStart, 0)); 1638 appendCommentToken(TokenType.SINGLE_LINE_COMMENT, getString(_tokenStart, 0));
1628 return next; 1639 return next;
1629 } 1640 }
1630 } 1641 }
1631 } 1642 }
1632 int tokenizeSingleLineRawString(int next, int quoteChar, int start) { 1643 int tokenizeSingleLineRawString(int next, int quoteChar, int start) {
1633 next = advance(); 1644 next = advance();
1634 while (next != -1) { 1645 while (next != -1) {
1635 if (next == quoteChar) { 1646 if (next == quoteChar) {
1636 appendStringToken(TokenType.STRING, getString(start, 0)); 1647 appendStringToken(TokenType.STRING, getString(start, 0));
1637 return advance(); 1648 return advance();
1638 } else if (next == 0xd || next == 0xa) { 1649 } else if (next == 0xD || next == 0xA) {
1639 reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL, []); 1650 reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL, []);
1640 appendStringToken(TokenType.STRING, getString(start, 0)); 1651 appendStringToken(TokenType.STRING, getString(start, 0));
1641 return advance(); 1652 return advance();
1642 } 1653 }
1643 next = advance(); 1654 next = advance();
1644 } 1655 }
1645 reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL, []); 1656 reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL, []);
1646 appendStringToken(TokenType.STRING, getString(start, 0)); 1657 appendStringToken(TokenType.STRING, getString(start, 0));
1647 return advance(); 1658 return advance();
1648 } 1659 }
1649 int tokenizeSingleLineString(int next, int quoteChar, int start) { 1660 int tokenizeSingleLineString(int next, int quoteChar, int start) {
1650 while (next != quoteChar) { 1661 while (next != quoteChar) {
1651 if (next == 0x5c) { 1662 if (next == 0x5C) {
1652 next = advance(); 1663 next = advance();
1653 } else if (next == 0x24) { 1664 } else if (next == 0x24) {
1654 appendStringToken(TokenType.STRING, getString(start, -1)); 1665 appendStringToken(TokenType.STRING, getString(start, -1));
1655 beginToken(); 1666 beginToken();
1656 next = tokenizeStringInterpolation(start); 1667 next = tokenizeStringInterpolation(start);
1657 start = offset; 1668 start = offset;
1658 continue; 1669 continue;
1659 } 1670 }
1660 if (next <= 0xd && (next == 0xa || next == 0xd || next == -1)) { 1671 if (next <= 0xD && (next == 0xA || next == 0xD || next == -1)) {
1661 reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL, []); 1672 reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL, []);
1662 appendStringToken(TokenType.STRING, getString(start, 0)); 1673 appendStringToken(TokenType.STRING, getString(start, 0));
1663 return advance(); 1674 return advance();
1664 } 1675 }
1665 next = advance(); 1676 next = advance();
1666 } 1677 }
1667 appendStringToken(TokenType.STRING, getString(start, 0)); 1678 appendStringToken(TokenType.STRING, getString(start, 0));
1668 return advance(); 1679 return advance();
1669 } 1680 }
1670 int tokenizeSlashOrComment(int next) { 1681 int tokenizeSlashOrComment(int next) {
1671 next = advance(); 1682 next = advance();
1672 if (0x2a == next) { 1683 if (0x2A == next) {
1673 return tokenizeMultiLineComment(next); 1684 return tokenizeMultiLineComment(next);
1674 } else if (0x2f == next) { 1685 } else if (0x2F == next) {
1675 return tokenizeSingleLineComment(next); 1686 return tokenizeSingleLineComment(next);
1676 } else if (0x3d == next) { 1687 } else if (0x3D == next) {
1677 appendToken(TokenType.SLASH_EQ); 1688 appendToken(TokenType.SLASH_EQ);
1678 return advance(); 1689 return advance();
1679 } else { 1690 } else {
1680 appendToken(TokenType.SLASH); 1691 appendToken(TokenType.SLASH);
1681 return next; 1692 return next;
1682 } 1693 }
1683 } 1694 }
1684 int tokenizeString(int next, int start, bool raw) { 1695 int tokenizeString(int next, int start, bool raw) {
1685 int quoteChar = next; 1696 int quoteChar = next;
1686 next = advance(); 1697 next = advance();
1687 if (quoteChar == next) { 1698 if (quoteChar == next) {
1688 next = advance(); 1699 next = advance();
1689 if (quoteChar == next) { 1700 if (quoteChar == next) {
1690 return tokenizeMultiLineString(quoteChar, start, raw); 1701 return tokenizeMultiLineString(quoteChar, start, raw);
1691 } else { 1702 } else {
1692 appendStringToken(TokenType.STRING, getString(start, -1)); 1703 appendStringToken(TokenType.STRING, getString(start, -1));
1693 return next; 1704 return next;
1694 } 1705 }
1695 } 1706 }
1696 if (raw) { 1707 if (raw) {
1697 return tokenizeSingleLineRawString(next, quoteChar, start); 1708 return tokenizeSingleLineRawString(next, quoteChar, start);
1698 } else { 1709 } else {
1699 return tokenizeSingleLineString(next, quoteChar, start); 1710 return tokenizeSingleLineString(next, quoteChar, start);
1700 } 1711 }
1701 } 1712 }
1702 int tokenizeStringInterpolation(int start) { 1713 int tokenizeStringInterpolation(int start) {
1703 beginToken(); 1714 beginToken();
1704 int next = advance(); 1715 int next = advance();
1705 if (next == 0x7b) { 1716 if (next == 0x7B) {
1706 return tokenizeInterpolatedExpression(next, start); 1717 return tokenizeInterpolatedExpression(next, start);
1707 } else { 1718 } else {
1708 return tokenizeInterpolatedIdentifier(next, start); 1719 return tokenizeInterpolatedIdentifier(next, start);
1709 } 1720 }
1710 } 1721 }
1711 int tokenizeTag(int next) { 1722 int tokenizeTag(int next) {
1712 if (offset == 0) { 1723 if (offset == 0) {
1713 if (peek() == 0x21) { 1724 if (peek() == 0x21) {
1714 do { 1725 do {
1715 next = advance(); 1726 next = advance();
1716 } while (next != 0xa && next != 0xd && next > 0); 1727 } while (next != 0xA && next != 0xD && next > 0);
1717 appendStringToken(TokenType.SCRIPT_TAG, getString(_tokenStart, 0)); 1728 appendStringToken(TokenType.SCRIPT_TAG, getString(_tokenStart, 0));
1718 return next; 1729 return next;
1719 } 1730 }
1720 } 1731 }
1721 appendToken(TokenType.HASH); 1732 appendToken(TokenType.HASH);
1722 return advance(); 1733 return advance();
1723 } 1734 }
1724 int tokenizeTilde(int next) { 1735 int tokenizeTilde(int next) {
1725 next = advance(); 1736 next = advance();
1726 if (next == 0x2f) { 1737 if (next == 0x2F) {
1727 return select(0x3d, TokenType.TILDE_SLASH_EQ, TokenType.TILDE_SLASH); 1738 return select(0x3D, TokenType.TILDE_SLASH_EQ, TokenType.TILDE_SLASH);
1728 } else { 1739 } else {
1729 appendToken(TokenType.TILDE); 1740 appendToken(TokenType.TILDE);
1730 return next; 1741 return next;
1731 } 1742 }
1732 } 1743 }
1733 } 1744 }
1734 /** 1745 /**
1735 * Instances of the class {@code KeywordTokenWithComment} implement a keyword to ken that is preceded 1746 * Instances of the class {@code KeywordTokenWithComment} implement a keyword to ken that is preceded
1736 * by comments. 1747 * by comments.
1737 */ 1748 */
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
1770 * strings in the given array of strings starting at the given offset and havi ng the given length. 1781 * strings in the given array of strings starting at the given offset and havi ng the given length.
1771 * All of these strings have a common prefix and the next character is at the given start index. 1782 * All of these strings have a common prefix and the next character is at the given start index.
1772 * @param start the index of the character in the strings used to transition t o a new state 1783 * @param start the index of the character in the strings used to transition t o a new state
1773 * @param strings an array containing all of the strings that will be recogniz ed by the state 1784 * @param strings an array containing all of the strings that will be recogniz ed by the state
1774 * machine 1785 * machine
1775 * @param offset the offset of the first string in the array that has the pref ix that is assumed 1786 * @param offset the offset of the first string in the array that has the pref ix that is assumed
1776 * to have been recognized by the time we reach the state being built 1787 * to have been recognized by the time we reach the state being built
1777 * @param length the number of strings in the array that pass through the stat e being built 1788 * @param length the number of strings in the array that pass through the stat e being built
1778 * @return the state that was created 1789 * @return the state that was created
1779 */ 1790 */
1780 static KeywordState computeKeywordStateTable(int start, List<String> strings, int offset, int length) { 1791 static KeywordState computeKeywordStateTable(int start, List<String> strings, int offset, int length12) {
1781 List<KeywordState> result = new List<KeywordState>.fixedLength(26); 1792 List<KeywordState> result = new List<KeywordState>.fixedLength(26);
1782 assert(length != 0); 1793 assert(length12 != 0);
1783 int chunk = 0x0; 1794 int chunk = 0x0;
1784 int chunkStart = -1; 1795 int chunkStart = -1;
1785 bool isLeaf = false; 1796 bool isLeaf = false;
1786 for (int i = offset; i < offset + length; i++) { 1797 for (int i = offset; i < offset + length12; i++) {
1787 if (strings[i].length == start) { 1798 if (strings[i].length == start) {
1788 isLeaf = true; 1799 isLeaf = true;
1789 } 1800 }
1790 if (strings[i].length > start) { 1801 if (strings[i].length > start) {
1791 int c = strings[i].charCodeAt(start); 1802 int c = strings[i].codeUnitAt(start);
1792 if (chunk != c) { 1803 if (chunk != c) {
1793 if (chunkStart != -1) { 1804 if (chunkStart != -1) {
1794 result[chunk - 0x61] = computeKeywordStateTable(start + 1, strings, chunkStart, i - chunkStart); 1805 result[chunk - 0x61] = computeKeywordStateTable(start + 1, strings, chunkStart, i - chunkStart);
1795 } 1806 }
1796 chunkStart = i; 1807 chunkStart = i;
1797 chunk = c; 1808 chunk = c;
1798 } 1809 }
1799 } 1810 }
1800 } 1811 }
1801 if (chunkStart != -1) { 1812 if (chunkStart != -1) {
1802 assert(result[chunk - 0x61] == null); 1813 assert(result[chunk - 0x61] == null);
1803 result[chunk - 0x61] = computeKeywordStateTable(start + 1, strings, chunkS tart, offset + length - chunkStart); 1814 result[chunk - 0x61] = computeKeywordStateTable(start + 1, strings, chunkS tart, offset + length12 - chunkStart);
1804 } else { 1815 } else {
1805 assert(length == 1); 1816 assert(length12 == 1);
1806 return new KeywordState(_EMPTY_TABLE, strings[offset]); 1817 return new KeywordState(_EMPTY_TABLE, strings[offset]);
1807 } 1818 }
1808 if (isLeaf) { 1819 if (isLeaf) {
1809 return new KeywordState(result, strings[offset]); 1820 return new KeywordState(result, strings[offset]);
1810 } else { 1821 } else {
1811 return new KeywordState(result, null); 1822 return new KeywordState(result, null);
1812 } 1823 }
1813 } 1824 }
1814 /** 1825 /**
1815 * Create the initial state in the state machine. 1826 * Create the initial state in the state machine.
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
1850 * @return the keyword that was matched by reaching this state 1861 * @return the keyword that was matched by reaching this state
1851 */ 1862 */
1852 Keyword keyword() => _keyword2; 1863 Keyword keyword() => _keyword2;
1853 /** 1864 /**
1854 * Return the state that follows this state on a transition of the given chara cter, or{@code null} if there is no valid state reachable from this state with s uch a transition. 1865 * Return the state that follows this state on a transition of the given chara cter, or{@code null} if there is no valid state reachable from this state with s uch a transition.
1855 * @param c the character used to transition from this state to another state 1866 * @param c the character used to transition from this state to another state
1856 * @return the state that follows this state on a transition of the given char acter 1867 * @return the state that follows this state on a transition of the given char acter
1857 */ 1868 */
1858 KeywordState next(int c) => _table[c - 0x61]; 1869 KeywordState next(int c) => _table[c - 0x61];
1859 } 1870 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698