Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(118)

Side by Side Diff: analyzer/lib/src/generated/scanner.dart

Issue 1400473008: Roll Observatory packages and add a roll script (Closed) Base URL: git@github.com:dart-lang/observatory_pub_packages.git@master
Patch Set: Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « analyzer/lib/src/generated/resolver.dart ('k') | analyzer/lib/src/generated/sdk.dart » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file.
4
5 library engine.scanner;
6
7 import 'dart:collection';
8
9 import 'error.dart';
10 import 'java_engine.dart';
11 import 'source.dart';
12
13 /**
14 * The opening half of a grouping pair of tokens. This is used for curly
15 * brackets ('{'), parentheses ('('), and square brackets ('[').
16 */
17 class BeginToken extends Token {
18 /**
19 * The token that corresponds to this token.
20 */
21 Token endToken;
22
23 /**
24 * Initialize a newly created token to have the given [type] at the given
25 * [offset].
26 */
27 BeginToken(TokenType type, int offset) : super(type, offset) {
28 assert(type == TokenType.OPEN_CURLY_BRACKET ||
29 type == TokenType.OPEN_PAREN ||
30 type == TokenType.OPEN_SQUARE_BRACKET ||
31 type == TokenType.STRING_INTERPOLATION_EXPRESSION);
32 }
33
34 @override
35 Token copy() => new BeginToken(type, offset);
36 }
37
38 /**
39 * A begin token that is preceded by comments.
40 */
41 class BeginTokenWithComment extends BeginToken {
42 /**
43 * The first comment in the list of comments that precede this token.
44 */
45 CommentToken _precedingComment;
46
47 /**
48 * Initialize a newly created token to have the given [type] at the given
49 * [offset] and to be preceded by the comments reachable from the given
50 * [comment].
51 */
52 BeginTokenWithComment(TokenType type, int offset, this._precedingComment)
53 : super(type, offset) {
54 _setCommentParent(_precedingComment);
55 }
56
57 CommentToken get precedingComments => _precedingComment;
58
59 void set precedingComments(CommentToken comment) {
60 _precedingComment = comment;
61 _setCommentParent(_precedingComment);
62 }
63
64 @override
65 void applyDelta(int delta) {
66 super.applyDelta(delta);
67 Token token = precedingComments;
68 while (token != null) {
69 token.applyDelta(delta);
70 token = token.next;
71 }
72 }
73
74 @override
75 Token copy() =>
76 new BeginTokenWithComment(type, offset, copyComments(precedingComments));
77 }
78
79 /**
80 * A [CharacterReader] that reads a range of characters from another character
81 * reader.
82 */
83 class CharacterRangeReader extends CharacterReader {
84 /**
85 * The reader from which the characters are actually being read.
86 */
87 final CharacterReader baseReader;
88
89 /**
90 * The last character to be read.
91 */
92 final int endIndex;
93
94 /**
95 * Initialize a newly created reader to read the characters from the given
96 * [baseReader] between the [startIndex] inclusive to [endIndex] exclusive.
97 */
98 CharacterRangeReader(this.baseReader, int startIndex, this.endIndex) {
99 baseReader.offset = startIndex - 1;
100 }
101
102 @override
103 int get offset => baseReader.offset;
104
105 @override
106 void set offset(int offset) {
107 baseReader.offset = offset;
108 }
109
110 @override
111 int advance() {
112 if (baseReader.offset + 1 >= endIndex) {
113 return -1;
114 }
115 return baseReader.advance();
116 }
117
118 @override
119 String getString(int start, int endDelta) =>
120 baseReader.getString(start, endDelta);
121
122 @override
123 int peek() {
124 if (baseReader.offset + 1 >= endIndex) {
125 return -1;
126 }
127 return baseReader.peek();
128 }
129 }
130
131 /**
132 * An object used by the scanner to read the characters to be scanned.
133 */
134 abstract class CharacterReader {
135 /**
136 * The current offset relative to the beginning of the source. Return the
137 * initial offset if the scanner has not yet scanned the source code, and one
138 * (1) past the end of the source code if the entire source code has been
139 * scanned.
140 */
141 int get offset;
142
143 /**
144 * Set the current offset relative to the beginning of the source to the given
145 * [offset]. The new offset must be between the initial offset and one (1)
146 * past the end of the source code.
147 */
148 void set offset(int offset);
149
150 /**
151 * Advance the current position and return the character at the new current
152 * position.
153 */
154 int advance();
155
156 /**
157 * Return the substring of the source code between the [start] offset and the
158 * modified current position. The current position is modified by adding the
159 * [endDelta], which is the number of characters after the current location to
160 * be included in the string, or the number of characters before the current
161 * location to be excluded if the offset is negative.
162 */
163 String getString(int start, int endDelta);
164
165 /**
166 * Return the character at the current position without changing the current
167 * position.
168 */
169 int peek();
170 }
171
172 /**
173 * A [CharacterReader] that reads characters from a character sequence.
174 */
175 class CharSequenceReader implements CharacterReader {
176 /**
177 * The sequence from which characters will be read.
178 */
179 final String _sequence;
180
181 /**
182 * The number of characters in the string.
183 */
184 int _stringLength = 0;
185
186 /**
187 * The index, relative to the string, of the last character that was read.
188 */
189 int _charOffset = 0;
190
191 /**
192 * Initialize a newly created reader to read the characters in the given
193 * [_sequence].
194 */
195 CharSequenceReader(this._sequence) {
196 this._stringLength = _sequence.length;
197 this._charOffset = -1;
198 }
199
200 @override
201 int get offset => _charOffset;
202
203 @override
204 void set offset(int offset) {
205 _charOffset = offset;
206 }
207
208 @override
209 int advance() {
210 if (_charOffset + 1 >= _stringLength) {
211 return -1;
212 }
213 return _sequence.codeUnitAt(++_charOffset);
214 }
215
216 @override
217 String getString(int start, int endDelta) =>
218 _sequence.substring(start, _charOffset + 1 + endDelta).toString();
219
220 @override
221 int peek() {
222 if (_charOffset + 1 >= _stringLength) {
223 return -1;
224 }
225 return _sequence.codeUnitAt(_charOffset + 1);
226 }
227 }
228
229 /**
230 * A token representing a comment.
231 */
232 class CommentToken extends StringToken {
233 /**
234 * The [Token] that contains this comment.
235 */
236 Token parent;
237
238 /**
239 * Initialize a newly created token to represent a token of the given [type]
240 * with the given [value] at the given [offset].
241 */
242 CommentToken(TokenType type, String value, int offset)
243 : super(type, value, offset);
244
245 @override
246 CommentToken copy() => new CommentToken(type, _value, offset);
247 }
248
249 /**
250 * A documentation comment token.
251 */
252 class DocumentationCommentToken extends CommentToken {
253 /**
254 * The references embedded within the documentation comment.
255 * This list will be empty unless this is a documentation comment that has
256 * references embedded within it.
257 */
258 final List<Token> references = <Token>[];
259
260 /**
261 * Initialize a newly created token to represent a token of the given [type]
262 * with the given [value] at the given [offset].
263 */
264 DocumentationCommentToken(TokenType type, String value, int offset)
265 : super(type, value, offset);
266
267 @override
268 CommentToken copy() => new DocumentationCommentToken(type, _value, offset);
269 }
270
271 /**
272 * The keywords in the Dart programming language.
273 */
274 class Keyword {
275 static const Keyword ASSERT = const Keyword('ASSERT', "assert");
276
277 static const Keyword BREAK = const Keyword('BREAK', "break");
278
279 static const Keyword CASE = const Keyword('CASE', "case");
280
281 static const Keyword CATCH = const Keyword('CATCH', "catch");
282
283 static const Keyword CLASS = const Keyword('CLASS', "class");
284
285 static const Keyword CONST = const Keyword('CONST', "const");
286
287 static const Keyword CONTINUE = const Keyword('CONTINUE', "continue");
288
289 static const Keyword DEFAULT = const Keyword('DEFAULT', "default");
290
291 static const Keyword DO = const Keyword('DO', "do");
292
293 static const Keyword ELSE = const Keyword('ELSE', "else");
294
295 static const Keyword ENUM = const Keyword('ENUM', "enum");
296
297 static const Keyword EXTENDS = const Keyword('EXTENDS', "extends");
298
299 static const Keyword FALSE = const Keyword('FALSE', "false");
300
301 static const Keyword FINAL = const Keyword('FINAL', "final");
302
303 static const Keyword FINALLY = const Keyword('FINALLY', "finally");
304
305 static const Keyword FOR = const Keyword('FOR', "for");
306
307 static const Keyword IF = const Keyword('IF', "if");
308
309 static const Keyword IN = const Keyword('IN', "in");
310
311 static const Keyword IS = const Keyword('IS', "is");
312
313 static const Keyword NEW = const Keyword('NEW', "new");
314
315 static const Keyword NULL = const Keyword('NULL', "null");
316
317 static const Keyword RETHROW = const Keyword('RETHROW', "rethrow");
318
319 static const Keyword RETURN = const Keyword('RETURN', "return");
320
321 static const Keyword SUPER = const Keyword('SUPER', "super");
322
323 static const Keyword SWITCH = const Keyword('SWITCH', "switch");
324
325 static const Keyword THIS = const Keyword('THIS', "this");
326
327 static const Keyword THROW = const Keyword('THROW', "throw");
328
329 static const Keyword TRUE = const Keyword('TRUE', "true");
330
331 static const Keyword TRY = const Keyword('TRY', "try");
332
333 static const Keyword VAR = const Keyword('VAR', "var");
334
335 static const Keyword VOID = const Keyword('VOID', "void");
336
337 static const Keyword WHILE = const Keyword('WHILE', "while");
338
339 static const Keyword WITH = const Keyword('WITH', "with");
340
341 static const Keyword ABSTRACT = const Keyword('ABSTRACT', "abstract", true);
342
343 static const Keyword AS = const Keyword('AS', "as", true);
344
345 static const Keyword DEFERRED = const Keyword('DEFERRED', "deferred", true);
346
347 static const Keyword DYNAMIC = const Keyword('DYNAMIC', "dynamic", true);
348
349 static const Keyword EXPORT = const Keyword('EXPORT', "export", true);
350
351 static const Keyword EXTERNAL = const Keyword('EXTERNAL', "external", true);
352
353 static const Keyword FACTORY = const Keyword('FACTORY', "factory", true);
354
355 static const Keyword GET = const Keyword('GET', "get", true);
356
357 static const Keyword IMPLEMENTS =
358 const Keyword('IMPLEMENTS', "implements", true);
359
360 static const Keyword IMPORT = const Keyword('IMPORT', "import", true);
361
362 static const Keyword LIBRARY = const Keyword('LIBRARY', "library", true);
363
364 static const Keyword OPERATOR = const Keyword('OPERATOR', "operator", true);
365
366 static const Keyword PART = const Keyword('PART', "part", true);
367
368 static const Keyword SET = const Keyword('SET', "set", true);
369
370 static const Keyword STATIC = const Keyword('STATIC', "static", true);
371
372 static const Keyword TYPEDEF = const Keyword('TYPEDEF', "typedef", true);
373
374 static const List<Keyword> values = const [
375 ASSERT,
376 BREAK,
377 CASE,
378 CATCH,
379 CLASS,
380 CONST,
381 CONTINUE,
382 DEFAULT,
383 DO,
384 ELSE,
385 ENUM,
386 EXTENDS,
387 FALSE,
388 FINAL,
389 FINALLY,
390 FOR,
391 IF,
392 IN,
393 IS,
394 NEW,
395 NULL,
396 RETHROW,
397 RETURN,
398 SUPER,
399 SWITCH,
400 THIS,
401 THROW,
402 TRUE,
403 TRY,
404 VAR,
405 VOID,
406 WHILE,
407 WITH,
408 ABSTRACT,
409 AS,
410 DEFERRED,
411 DYNAMIC,
412 EXPORT,
413 EXTERNAL,
414 FACTORY,
415 GET,
416 IMPLEMENTS,
417 IMPORT,
418 LIBRARY,
419 OPERATOR,
420 PART,
421 SET,
422 STATIC,
423 TYPEDEF
424 ];
425
426 /**
427 * A table mapping the lexemes of keywords to the corresponding keyword.
428 */
429 static final Map<String, Keyword> keywords = _createKeywordMap();
430
431 /**
432 * The name of the keyword type.
433 */
434 final String name;
435
436 /**
437 * The lexeme for the keyword.
438 */
439 final String syntax;
440
441 /**
442 * A flag indicating whether the keyword is a pseudo-keyword. Pseudo keywords
443 * can be used as identifiers.
444 */
445 final bool isPseudoKeyword;
446
447 /**
448 * Initialize a newly created keyword to have the given [name] and [syntax].
449 * The keyword is a pseudo-keyword if the [isPseudoKeyword] flag is `true`.
450 */
451 const Keyword(this.name, this.syntax, [this.isPseudoKeyword = false]);
452
453 @override
454 String toString() => name;
455
456 /**
457 * Create a table mapping the lexemes of keywords to the corresponding keyword
458 * and return the table that was created.
459 */
460 static Map<String, Keyword> _createKeywordMap() {
461 LinkedHashMap<String, Keyword> result =
462 new LinkedHashMap<String, Keyword>();
463 for (Keyword keyword in values) {
464 result[keyword.syntax] = keyword;
465 }
466 return result;
467 }
468 }
469
470 /**
471 * A state in a state machine used to scan keywords.
472 */
473 class KeywordState {
474 /**
475 * An empty transition table used by leaf states.
476 */
477 static List<KeywordState> _EMPTY_TABLE = new List<KeywordState>(26);
478
479 /**
480 * The initial state in the state machine.
481 */
482 static final KeywordState KEYWORD_STATE = _createKeywordStateTable();
483
484 /**
485 * A table mapping characters to the states to which those characters will
486 * transition. (The index into the array is the offset from the character
487 * `'a'` to the transitioning character.)
488 */
489 final List<KeywordState> _table;
490
491 /**
492 * The keyword that is recognized by this state, or `null` if this state is
493 * not a terminal state.
494 */
495 Keyword _keyword;
496
497 /**
498 * Initialize a newly created state to have the given transitions and to
499 * recognize the keyword with the given [syntax].
500 */
501 KeywordState(this._table, String syntax) {
502 this._keyword = (syntax == null) ? null : Keyword.keywords[syntax];
503 }
504
505 /**
506 * Return the keyword that was recognized by this state, or `null` if this
507 * state does not recognized a keyword.
508 */
509 Keyword keyword() => _keyword;
510
511 /**
512 * Return the state that follows this state on a transition of the given
513 * [character], or `null` if there is no valid state reachable from this state
514 * with such a transition.
515 */
516 KeywordState next(int character) => _table[character - 0x61];
517
518 /**
519 * Create the next state in the state machine where we have already recognized
520 * the subset of strings in the given array of [strings] starting at the given
521 * [offset] and having the given [length]. All of these strings have a common
522 * prefix and the next character is at the given [start] index.
523 */
524 static KeywordState _computeKeywordStateTable(
525 int start, List<String> strings, int offset, int length) {
526 List<KeywordState> result = new List<KeywordState>(26);
527 assert(length != 0);
528 int chunk = 0x0;
529 int chunkStart = -1;
530 bool isLeaf = false;
531 for (int i = offset; i < offset + length; i++) {
532 if (strings[i].length == start) {
533 isLeaf = true;
534 }
535 if (strings[i].length > start) {
536 int c = strings[i].codeUnitAt(start);
537 if (chunk != c) {
538 if (chunkStart != -1) {
539 result[chunk - 0x61] = _computeKeywordStateTable(
540 start + 1, strings, chunkStart, i - chunkStart);
541 }
542 chunkStart = i;
543 chunk = c;
544 }
545 }
546 }
547 if (chunkStart != -1) {
548 assert(result[chunk - 0x61] == null);
549 result[chunk - 0x61] = _computeKeywordStateTable(
550 start + 1, strings, chunkStart, offset + length - chunkStart);
551 } else {
552 assert(length == 1);
553 return new KeywordState(_EMPTY_TABLE, strings[offset]);
554 }
555 if (isLeaf) {
556 return new KeywordState(result, strings[offset]);
557 } else {
558 return new KeywordState(result, null);
559 }
560 }
561
562 /**
563 * Create and return the initial state in the state machine.
564 */
565 static KeywordState _createKeywordStateTable() {
566 List<Keyword> values = Keyword.values;
567 List<String> strings = new List<String>(values.length);
568 for (int i = 0; i < values.length; i++) {
569 strings[i] = values[i].syntax;
570 }
571 strings.sort();
572 return _computeKeywordStateTable(0, strings, 0, strings.length);
573 }
574 }
575
576 /**
577 * A token representing a keyword in the language.
578 */
579 class KeywordToken extends Token {
580 /**
581 * The keyword being represented by this token.
582 */
583 final Keyword keyword;
584
585 /**
586 * Initialize a newly created token to represent the given [keyword] at the
587 * given [offset].
588 */
589 KeywordToken(this.keyword, int offset) : super(TokenType.KEYWORD, offset);
590
591 @override
592 String get lexeme => keyword.syntax;
593
594 @override
595 Token copy() => new KeywordToken(keyword, offset);
596
597 @override
598 Keyword value() => keyword;
599 }
600
601 /**
602 * A keyword token that is preceded by comments.
603 */
604 class KeywordTokenWithComment extends KeywordToken {
605 /**
606 * The first comment in the list of comments that precede this token.
607 */
608 CommentToken _precedingComment;
609
610 /**
611 * Initialize a newly created token to to represent the given [keyword] at the
612 * given [offset] and to be preceded by the comments reachable from the given
613 * [comment].
614 */
615 KeywordTokenWithComment(Keyword keyword, int offset, this._precedingComment)
616 : super(keyword, offset) {
617 _setCommentParent(_precedingComment);
618 }
619
620 CommentToken get precedingComments => _precedingComment;
621
622 void set precedingComments(CommentToken comment) {
623 _precedingComment = comment;
624 _setCommentParent(_precedingComment);
625 }
626
627 @override
628 void applyDelta(int delta) {
629 super.applyDelta(delta);
630 Token token = precedingComments;
631 while (token != null) {
632 token.applyDelta(delta);
633 token = token.next;
634 }
635 }
636
637 @override
638 Token copy() => new KeywordTokenWithComment(
639 keyword, offset, copyComments(precedingComments));
640 }
641
642 /**
643 * The class `Scanner` implements a scanner for Dart code.
644 *
645 * The lexical structure of Dart is ambiguous without knowledge of the context
646 * in which a token is being scanned. For example, without context we cannot
647 * determine whether source of the form "<<" should be scanned as a single
648 * left-shift operator or as two left angle brackets. This scanner does not have
649 * any context, so it always resolves such conflicts by scanning the longest
650 * possible token.
651 */
652 class Scanner {
653 /**
654 * The source being scanned.
655 */
656 final Source source;
657
658 /**
659 * The reader used to access the characters in the source.
660 */
661 final CharacterReader _reader;
662
663 /**
664 * The error listener that will be informed of any errors that are found
665 * during the scan.
666 */
667 final AnalysisErrorListener _errorListener;
668
669 /**
670 * The flag specifying whether documentation comments should be parsed.
671 */
672 bool _preserveComments = true;
673
674 /**
675 * The token pointing to the head of the linked list of tokens.
676 */
677 Token _tokens;
678
679 /**
680 * The last token that was scanned.
681 */
682 Token _tail;
683
684 /**
685 * The first token in the list of comment tokens found since the last
686 * non-comment token.
687 */
688 Token _firstComment;
689
690 /**
691 * The last token in the list of comment tokens found since the last
692 * non-comment token.
693 */
694 Token _lastComment;
695
696 /**
697 * The index of the first character of the current token.
698 */
699 int _tokenStart = 0;
700
701 /**
702 * A list containing the offsets of the first character of each line in the
703 * source code.
704 */
705 List<int> _lineStarts = new List<int>();
706
707 /**
708 * A list, treated something like a stack, of tokens representing the
709 * beginning of a matched pair. It is used to pair the end tokens with the
710 * begin tokens.
711 */
712 List<BeginToken> _groupingStack = new List<BeginToken>();
713
714 /**
715 * The index of the last item in the [_groupingStack], or `-1` if the stack is
716 * empty.
717 */
718 int _stackEnd = -1;
719
720 /**
721 * A flag indicating whether any unmatched groups were found during the parse.
722 */
723 bool _hasUnmatchedGroups = false;
724
725 /**
726 * Initialize a newly created scanner to scan characters from the given
727 * [source]. The given character [_reader] will be used to read the characters
728 * in the source. The given [_errorListener] will be informed of any errors
729 * that are found.
730 */
731 Scanner(this.source, this._reader, this._errorListener) {
732 _tokens = new Token(TokenType.EOF, -1);
733 _tokens.setNext(_tokens);
734 _tail = _tokens;
735 _tokenStart = -1;
736 _lineStarts.add(0);
737 }
738
739 /**
740 * Return the first token in the token stream that was scanned.
741 */
742 Token get firstToken => _tokens.next;
743
744 /**
745 * Return `true` if any unmatched groups were found during the parse.
746 */
747 bool get hasUnmatchedGroups => _hasUnmatchedGroups;
748
749 /**
750 * Return an array containing the offsets of the first character of each line
751 * in the source code.
752 */
753 List<int> get lineStarts => _lineStarts;
754
755 /**
756 * Set whether documentation tokens should be preserved.
757 */
758 void set preserveComments(bool preserveComments) {
759 this._preserveComments = preserveComments;
760 }
761
762 /**
763 * Return the last token that was scanned.
764 */
765 Token get tail => _tail;
766
767 /**
768 * Append the given [token] to the end of the token stream being scanned. This
769 * method is intended to be used by subclasses that copy existing tokens and
770 * should not normally be used because it will fail to correctly associate any
771 * comments with the token being passed in.
772 */
773 void appendToken(Token token) {
774 _tail = _tail.setNext(token);
775 }
776
777 int bigSwitch(int next) {
778 _beginToken();
779 if (next == 0xD) {
780 // '\r'
781 next = _reader.advance();
782 if (next == 0xA) {
783 // '\n'
784 next = _reader.advance();
785 }
786 recordStartOfLine();
787 return next;
788 } else if (next == 0xA) {
789 // '\n'
790 next = _reader.advance();
791 recordStartOfLine();
792 return next;
793 } else if (next == 0x9 || next == 0x20) {
794 // '\t' || ' '
795 return _reader.advance();
796 }
797 if (next == 0x72) {
798 // 'r'
799 int peek = _reader.peek();
800 if (peek == 0x22 || peek == 0x27) {
801 // '"' || "'"
802 int start = _reader.offset;
803 return _tokenizeString(_reader.advance(), start, true);
804 }
805 }
806 if (0x61 <= next && next <= 0x7A) {
807 // 'a'-'z'
808 return _tokenizeKeywordOrIdentifier(next, true);
809 }
810 if ((0x41 <= next && next <= 0x5A) || next == 0x5F || next == 0x24) {
811 // 'A'-'Z' || '_' || '$'
812 return _tokenizeIdentifier(next, _reader.offset, true);
813 }
814 if (next == 0x3C) {
815 // '<'
816 return _tokenizeLessThan(next);
817 }
818 if (next == 0x3E) {
819 // '>'
820 return _tokenizeGreaterThan(next);
821 }
822 if (next == 0x3D) {
823 // '='
824 return _tokenizeEquals(next);
825 }
826 if (next == 0x21) {
827 // '!'
828 return _tokenizeExclamation(next);
829 }
830 if (next == 0x2B) {
831 // '+'
832 return _tokenizePlus(next);
833 }
834 if (next == 0x2D) {
835 // '-'
836 return _tokenizeMinus(next);
837 }
838 if (next == 0x2A) {
839 // '*'
840 return _tokenizeMultiply(next);
841 }
842 if (next == 0x25) {
843 // '%'
844 return _tokenizePercent(next);
845 }
846 if (next == 0x26) {
847 // '&'
848 return _tokenizeAmpersand(next);
849 }
850 if (next == 0x7C) {
851 // '|'
852 return _tokenizeBar(next);
853 }
854 if (next == 0x5E) {
855 // '^'
856 return _tokenizeCaret(next);
857 }
858 if (next == 0x5B) {
859 // '['
860 return _tokenizeOpenSquareBracket(next);
861 }
862 if (next == 0x7E) {
863 // '~'
864 return _tokenizeTilde(next);
865 }
866 if (next == 0x5C) {
867 // '\\'
868 _appendTokenOfType(TokenType.BACKSLASH);
869 return _reader.advance();
870 }
871 if (next == 0x23) {
872 // '#'
873 return _tokenizeTag(next);
874 }
875 if (next == 0x28) {
876 // '('
877 _appendBeginToken(TokenType.OPEN_PAREN);
878 return _reader.advance();
879 }
880 if (next == 0x29) {
881 // ')'
882 _appendEndToken(TokenType.CLOSE_PAREN, TokenType.OPEN_PAREN);
883 return _reader.advance();
884 }
885 if (next == 0x2C) {
886 // ','
887 _appendTokenOfType(TokenType.COMMA);
888 return _reader.advance();
889 }
890 if (next == 0x3A) {
891 // ':'
892 _appendTokenOfType(TokenType.COLON);
893 return _reader.advance();
894 }
895 if (next == 0x3B) {
896 // ';'
897 _appendTokenOfType(TokenType.SEMICOLON);
898 return _reader.advance();
899 }
900 if (next == 0x3F) {
901 // '?'
902 return _tokenizeQuestion();
903 }
904 if (next == 0x5D) {
905 // ']'
906 _appendEndToken(
907 TokenType.CLOSE_SQUARE_BRACKET, TokenType.OPEN_SQUARE_BRACKET);
908 return _reader.advance();
909 }
910 if (next == 0x60) {
911 // '`'
912 _appendTokenOfType(TokenType.BACKPING);
913 return _reader.advance();
914 }
915 if (next == 0x7B) {
916 // '{'
917 _appendBeginToken(TokenType.OPEN_CURLY_BRACKET);
918 return _reader.advance();
919 }
920 if (next == 0x7D) {
921 // '}'
922 _appendEndToken(
923 TokenType.CLOSE_CURLY_BRACKET, TokenType.OPEN_CURLY_BRACKET);
924 return _reader.advance();
925 }
926 if (next == 0x2F) {
927 // '/'
928 return _tokenizeSlashOrComment(next);
929 }
930 if (next == 0x40) {
931 // '@'
932 _appendTokenOfType(TokenType.AT);
933 return _reader.advance();
934 }
935 if (next == 0x22 || next == 0x27) {
936 // '"' || "'"
937 return _tokenizeString(next, _reader.offset, false);
938 }
939 if (next == 0x2E) {
940 // '.'
941 return _tokenizeDotOrNumber(next);
942 }
943 if (next == 0x30) {
944 // '0'
945 return _tokenizeHexOrNumber(next);
946 }
947 if (0x31 <= next && next <= 0x39) {
948 // '1'-'9'
949 return _tokenizeNumber(next);
950 }
951 if (next == -1) {
952 // EOF
953 return -1;
954 }
955 _reportError(ScannerErrorCode.ILLEGAL_CHARACTER, [next]);
956 return _reader.advance();
957 }
958
959 /**
960 * Record the fact that we are at the beginning of a new line in the source.
961 */
962 void recordStartOfLine() {
963 _lineStarts.add(_reader.offset);
964 }
965
966 /**
967 * Record that the source begins on the given [line] and [column] at the
968 * current offset as given by the reader. Both the line and the column are
969 * one-based indexes. The line starts for lines before the given line will not
970 * be correct.
971 *
972 * This method must be invoked at most one time and must be invoked before
973 * scanning begins. The values provided must be sensible. The results are
974 * undefined if these conditions are violated.
975 */
976 void setSourceStart(int line, int column) {
977 int offset = _reader.offset;
978 if (line < 1 || column < 1 || offset < 0 || (line + column - 2) >= offset) {
979 return;
980 }
981 for (int i = 2; i < line; i++) {
982 _lineStarts.add(1);
983 }
984 _lineStarts.add(offset - column + 1);
985 }
986
987 /**
988 * Scan the source code to produce a list of tokens representing the source,
989 * and return the first token in the list of tokens that were produced.
990 */
991 Token tokenize() {
992 int next = _reader.advance();
993 while (next != -1) {
994 next = bigSwitch(next);
995 }
996 _appendEofToken();
997 return firstToken;
998 }
999
1000 void _appendBeginToken(TokenType type) {
1001 BeginToken token;
1002 if (_firstComment == null) {
1003 token = new BeginToken(type, _tokenStart);
1004 } else {
1005 token = new BeginTokenWithComment(type, _tokenStart, _firstComment);
1006 _firstComment = null;
1007 _lastComment = null;
1008 }
1009 _tail = _tail.setNext(token);
1010 _groupingStack.add(token);
1011 _stackEnd++;
1012 }
1013
1014 void _appendCommentToken(TokenType type, String value) {
1015 // Ignore comment tokens if client specified that it doesn't need them.
1016 if (!_preserveComments) {
1017 return;
1018 }
1019 // OK, remember comment tokens.
1020 CommentToken token;
1021 if (_isDocumentationComment(value)) {
1022 token = new DocumentationCommentToken(type, value, _tokenStart);
1023 } else {
1024 token = new CommentToken(type, value, _tokenStart);
1025 }
1026 if (_firstComment == null) {
1027 _firstComment = token;
1028 _lastComment = _firstComment;
1029 } else {
1030 _lastComment = _lastComment.setNext(token);
1031 }
1032 }
1033
1034 void _appendEndToken(TokenType type, TokenType beginType) {
1035 Token token;
1036 if (_firstComment == null) {
1037 token = new Token(type, _tokenStart);
1038 } else {
1039 token = new TokenWithComment(type, _tokenStart, _firstComment);
1040 _firstComment = null;
1041 _lastComment = null;
1042 }
1043 _tail = _tail.setNext(token);
1044 if (_stackEnd >= 0) {
1045 BeginToken begin = _groupingStack[_stackEnd];
1046 if (begin.type == beginType) {
1047 begin.endToken = token;
1048 _groupingStack.removeAt(_stackEnd--);
1049 }
1050 }
1051 }
1052
1053 void _appendEofToken() {
1054 Token eofToken;
1055 if (_firstComment == null) {
1056 eofToken = new Token(TokenType.EOF, _reader.offset + 1);
1057 } else {
1058 eofToken = new TokenWithComment(
1059 TokenType.EOF, _reader.offset + 1, _firstComment);
1060 _firstComment = null;
1061 _lastComment = null;
1062 }
1063 // The EOF token points to itself so that there is always infinite
1064 // look-ahead.
1065 eofToken.setNext(eofToken);
1066 _tail = _tail.setNext(eofToken);
1067 if (_stackEnd >= 0) {
1068 _hasUnmatchedGroups = true;
1069 // TODO(brianwilkerson) Fix the ungrouped tokens?
1070 }
1071 }
1072
1073 void _appendKeywordToken(Keyword keyword) {
1074 if (_firstComment == null) {
1075 _tail = _tail.setNext(new KeywordToken(keyword, _tokenStart));
1076 } else {
1077 _tail = _tail.setNext(
1078 new KeywordTokenWithComment(keyword, _tokenStart, _firstComment));
1079 _firstComment = null;
1080 _lastComment = null;
1081 }
1082 }
1083
1084 void _appendStringToken(TokenType type, String value) {
1085 if (_firstComment == null) {
1086 _tail = _tail.setNext(new StringToken(type, value, _tokenStart));
1087 } else {
1088 _tail = _tail.setNext(
1089 new StringTokenWithComment(type, value, _tokenStart, _firstComment));
1090 _firstComment = null;
1091 _lastComment = null;
1092 }
1093 }
1094
1095 void _appendStringTokenWithOffset(TokenType type, String value, int offset) {
1096 if (_firstComment == null) {
1097 _tail = _tail.setNext(new StringToken(type, value, _tokenStart + offset));
1098 } else {
1099 _tail = _tail.setNext(new StringTokenWithComment(
1100 type, value, _tokenStart + offset, _firstComment));
1101 _firstComment = null;
1102 _lastComment = null;
1103 }
1104 }
1105
1106 void _appendTokenOfType(TokenType type) {
1107 if (_firstComment == null) {
1108 _tail = _tail.setNext(new Token(type, _tokenStart));
1109 } else {
1110 _tail =
1111 _tail.setNext(new TokenWithComment(type, _tokenStart, _firstComment));
1112 _firstComment = null;
1113 _lastComment = null;
1114 }
1115 }
1116
1117 void _appendTokenOfTypeWithOffset(TokenType type, int offset) {
1118 if (_firstComment == null) {
1119 _tail = _tail.setNext(new Token(type, offset));
1120 } else {
1121 _tail = _tail.setNext(new TokenWithComment(type, offset, _firstComment));
1122 _firstComment = null;
1123 _lastComment = null;
1124 }
1125 }
1126
1127 void _beginToken() {
1128 _tokenStart = _reader.offset;
1129 }
1130
1131 /**
1132 * Return the beginning token corresponding to a closing brace that was found
1133 * while scanning inside a string interpolation expression. Tokens that cannot
1134 * be matched with the closing brace will be dropped from the stack.
1135 */
1136 BeginToken _findTokenMatchingClosingBraceInInterpolationExpression() {
1137 while (_stackEnd >= 0) {
1138 BeginToken begin = _groupingStack[_stackEnd];
1139 if (begin.type == TokenType.OPEN_CURLY_BRACKET ||
1140 begin.type == TokenType.STRING_INTERPOLATION_EXPRESSION) {
1141 return begin;
1142 }
1143 _hasUnmatchedGroups = true;
1144 _groupingStack.removeAt(_stackEnd--);
1145 }
1146 //
1147 // We should never get to this point because we wouldn't be inside a string
1148 // interpolation expression unless we had previously found the start of the
1149 // expression.
1150 //
1151 return null;
1152 }
1153
1154 /**
1155 * Report an error at the current offset. The [errorCode] is the error code
1156 * indicating the nature of the error. The [arguments] are any arguments
1157 * needed to complete the error message
1158 */
1159 void _reportError(ScannerErrorCode errorCode, [List<Object> arguments]) {
1160 _errorListener.onError(
1161 new AnalysisError(source, _reader.offset, 1, errorCode, arguments));
1162 }
1163
1164 int _select(int choice, TokenType yesType, TokenType noType) {
1165 int next = _reader.advance();
1166 if (next == choice) {
1167 _appendTokenOfType(yesType);
1168 return _reader.advance();
1169 } else {
1170 _appendTokenOfType(noType);
1171 return next;
1172 }
1173 }
1174
1175 int _selectWithOffset(
1176 int choice, TokenType yesType, TokenType noType, int offset) {
1177 int next = _reader.advance();
1178 if (next == choice) {
1179 _appendTokenOfTypeWithOffset(yesType, offset);
1180 return _reader.advance();
1181 } else {
1182 _appendTokenOfTypeWithOffset(noType, offset);
1183 return next;
1184 }
1185 }
1186
1187 int _tokenizeAmpersand(int next) {
1188 // && &= &
1189 next = _reader.advance();
1190 if (next == 0x26) {
1191 _appendTokenOfType(TokenType.AMPERSAND_AMPERSAND);
1192 return _reader.advance();
1193 } else if (next == 0x3D) {
1194 _appendTokenOfType(TokenType.AMPERSAND_EQ);
1195 return _reader.advance();
1196 } else {
1197 _appendTokenOfType(TokenType.AMPERSAND);
1198 return next;
1199 }
1200 }
1201
1202 int _tokenizeBar(int next) {
1203 // | || |=
1204 next = _reader.advance();
1205 if (next == 0x7C) {
1206 _appendTokenOfType(TokenType.BAR_BAR);
1207 return _reader.advance();
1208 } else if (next == 0x3D) {
1209 _appendTokenOfType(TokenType.BAR_EQ);
1210 return _reader.advance();
1211 } else {
1212 _appendTokenOfType(TokenType.BAR);
1213 return next;
1214 }
1215 }
1216
1217 int _tokenizeCaret(int next) =>
1218 _select(0x3D, TokenType.CARET_EQ, TokenType.CARET);
1219
1220 int _tokenizeDotOrNumber(int next) {
1221 int start = _reader.offset;
1222 next = _reader.advance();
1223 if (0x30 <= next && next <= 0x39) {
1224 return _tokenizeFractionPart(next, start);
1225 } else if (0x2E == next) {
1226 return _select(
1227 0x2E, TokenType.PERIOD_PERIOD_PERIOD, TokenType.PERIOD_PERIOD);
1228 } else {
1229 _appendTokenOfType(TokenType.PERIOD);
1230 return next;
1231 }
1232 }
1233
1234 int _tokenizeEquals(int next) {
1235 // = == =>
1236 next = _reader.advance();
1237 if (next == 0x3D) {
1238 _appendTokenOfType(TokenType.EQ_EQ);
1239 return _reader.advance();
1240 } else if (next == 0x3E) {
1241 _appendTokenOfType(TokenType.FUNCTION);
1242 return _reader.advance();
1243 }
1244 _appendTokenOfType(TokenType.EQ);
1245 return next;
1246 }
1247
1248 int _tokenizeExclamation(int next) {
1249 // ! !=
1250 next = _reader.advance();
1251 if (next == 0x3D) {
1252 _appendTokenOfType(TokenType.BANG_EQ);
1253 return _reader.advance();
1254 }
1255 _appendTokenOfType(TokenType.BANG);
1256 return next;
1257 }
1258
1259 int _tokenizeExponent(int next) {
1260 if (next == 0x2B || next == 0x2D) {
1261 next = _reader.advance();
1262 }
1263 bool hasDigits = false;
1264 while (true) {
1265 if (0x30 <= next && next <= 0x39) {
1266 hasDigits = true;
1267 } else {
1268 if (!hasDigits) {
1269 _reportError(ScannerErrorCode.MISSING_DIGIT);
1270 }
1271 return next;
1272 }
1273 next = _reader.advance();
1274 }
1275 }
1276
1277 int _tokenizeFractionPart(int next, int start) {
1278 bool done = false;
1279 bool hasDigit = false;
1280 LOOP: while (!done) {
1281 if (0x30 <= next && next <= 0x39) {
1282 hasDigit = true;
1283 } else if (0x65 == next || 0x45 == next) {
1284 hasDigit = true;
1285 next = _tokenizeExponent(_reader.advance());
1286 done = true;
1287 continue LOOP;
1288 } else {
1289 done = true;
1290 continue LOOP;
1291 }
1292 next = _reader.advance();
1293 }
1294 if (!hasDigit) {
1295 _appendStringToken(TokenType.INT, _reader.getString(start, -2));
1296 if (0x2E == next) {
1297 return _selectWithOffset(0x2E, TokenType.PERIOD_PERIOD_PERIOD,
1298 TokenType.PERIOD_PERIOD, _reader.offset - 1);
1299 }
1300 _appendTokenOfTypeWithOffset(TokenType.PERIOD, _reader.offset - 1);
1301 return bigSwitch(next);
1302 }
1303 _appendStringToken(
1304 TokenType.DOUBLE, _reader.getString(start, next < 0 ? 0 : -1));
1305 return next;
1306 }
1307
1308 int _tokenizeGreaterThan(int next) {
1309 // > >= >> >>=
1310 next = _reader.advance();
1311 if (0x3D == next) {
1312 _appendTokenOfType(TokenType.GT_EQ);
1313 return _reader.advance();
1314 } else if (0x3E == next) {
1315 next = _reader.advance();
1316 if (0x3D == next) {
1317 _appendTokenOfType(TokenType.GT_GT_EQ);
1318 return _reader.advance();
1319 } else {
1320 _appendTokenOfType(TokenType.GT_GT);
1321 return next;
1322 }
1323 } else {
1324 _appendTokenOfType(TokenType.GT);
1325 return next;
1326 }
1327 }
1328
1329 int _tokenizeHex(int next) {
1330 int start = _reader.offset - 1;
1331 bool hasDigits = false;
1332 while (true) {
1333 next = _reader.advance();
1334 if ((0x30 <= next && next <= 0x39) ||
1335 (0x41 <= next && next <= 0x46) ||
1336 (0x61 <= next && next <= 0x66)) {
1337 hasDigits = true;
1338 } else {
1339 if (!hasDigits) {
1340 _reportError(ScannerErrorCode.MISSING_HEX_DIGIT);
1341 }
1342 _appendStringToken(
1343 TokenType.HEXADECIMAL, _reader.getString(start, next < 0 ? 0 : -1));
1344 return next;
1345 }
1346 }
1347 }
1348
1349 int _tokenizeHexOrNumber(int next) {
1350 int x = _reader.peek();
1351 if (x == 0x78 || x == 0x58) {
1352 _reader.advance();
1353 return _tokenizeHex(x);
1354 }
1355 return _tokenizeNumber(next);
1356 }
1357
1358 int _tokenizeIdentifier(int next, int start, bool allowDollar) {
1359 while ((0x61 <= next && next <= 0x7A) ||
1360 (0x41 <= next && next <= 0x5A) ||
1361 (0x30 <= next && next <= 0x39) ||
1362 next == 0x5F ||
1363 (next == 0x24 && allowDollar)) {
1364 next = _reader.advance();
1365 }
1366 _appendStringToken(
1367 TokenType.IDENTIFIER, _reader.getString(start, next < 0 ? 0 : -1));
1368 return next;
1369 }
1370
1371 int _tokenizeInterpolatedExpression(int next, int start) {
1372 _appendBeginToken(TokenType.STRING_INTERPOLATION_EXPRESSION);
1373 next = _reader.advance();
1374 while (next != -1) {
1375 if (next == 0x7D) {
1376 BeginToken begin =
1377 _findTokenMatchingClosingBraceInInterpolationExpression();
1378 if (begin == null) {
1379 _beginToken();
1380 _appendTokenOfType(TokenType.CLOSE_CURLY_BRACKET);
1381 next = _reader.advance();
1382 _beginToken();
1383 return next;
1384 } else if (begin.type == TokenType.OPEN_CURLY_BRACKET) {
1385 _beginToken();
1386 _appendEndToken(
1387 TokenType.CLOSE_CURLY_BRACKET, TokenType.OPEN_CURLY_BRACKET);
1388 next = _reader.advance();
1389 _beginToken();
1390 } else if (begin.type == TokenType.STRING_INTERPOLATION_EXPRESSION) {
1391 _beginToken();
1392 _appendEndToken(TokenType.CLOSE_CURLY_BRACKET,
1393 TokenType.STRING_INTERPOLATION_EXPRESSION);
1394 next = _reader.advance();
1395 _beginToken();
1396 return next;
1397 }
1398 } else {
1399 next = bigSwitch(next);
1400 }
1401 }
1402 return next;
1403 }
1404
1405 int _tokenizeInterpolatedIdentifier(int next, int start) {
1406 _appendStringTokenWithOffset(
1407 TokenType.STRING_INTERPOLATION_IDENTIFIER, "\$", 0);
1408 if ((0x41 <= next && next <= 0x5A) ||
1409 (0x61 <= next && next <= 0x7A) ||
1410 next == 0x5F) {
1411 _beginToken();
1412 next = _tokenizeKeywordOrIdentifier(next, false);
1413 }
1414 _beginToken();
1415 return next;
1416 }
1417
1418 int _tokenizeKeywordOrIdentifier(int next, bool allowDollar) {
1419 KeywordState state = KeywordState.KEYWORD_STATE;
1420 int start = _reader.offset;
1421 while (state != null && 0x61 <= next && next <= 0x7A) {
1422 state = state.next(next);
1423 next = _reader.advance();
1424 }
1425 if (state == null || state.keyword() == null) {
1426 return _tokenizeIdentifier(next, start, allowDollar);
1427 }
1428 if ((0x41 <= next && next <= 0x5A) ||
1429 (0x30 <= next && next <= 0x39) ||
1430 next == 0x5F ||
1431 next == 0x24) {
1432 return _tokenizeIdentifier(next, start, allowDollar);
1433 } else if (next < 128) {
1434 _appendKeywordToken(state.keyword());
1435 return next;
1436 } else {
1437 return _tokenizeIdentifier(next, start, allowDollar);
1438 }
1439 }
1440
1441 int _tokenizeLessThan(int next) {
1442 // < <= << <<=
1443 next = _reader.advance();
1444 if (0x3D == next) {
1445 _appendTokenOfType(TokenType.LT_EQ);
1446 return _reader.advance();
1447 } else if (0x3C == next) {
1448 return _select(0x3D, TokenType.LT_LT_EQ, TokenType.LT_LT);
1449 } else {
1450 _appendTokenOfType(TokenType.LT);
1451 return next;
1452 }
1453 }
1454
1455 int _tokenizeMinus(int next) {
1456 // - -- -=
1457 next = _reader.advance();
1458 if (next == 0x2D) {
1459 _appendTokenOfType(TokenType.MINUS_MINUS);
1460 return _reader.advance();
1461 } else if (next == 0x3D) {
1462 _appendTokenOfType(TokenType.MINUS_EQ);
1463 return _reader.advance();
1464 } else {
1465 _appendTokenOfType(TokenType.MINUS);
1466 return next;
1467 }
1468 }
1469
1470 int _tokenizeMultiLineComment(int next) {
1471 int nesting = 1;
1472 next = _reader.advance();
1473 while (true) {
1474 if (-1 == next) {
1475 _reportError(ScannerErrorCode.UNTERMINATED_MULTI_LINE_COMMENT);
1476 _appendCommentToken(
1477 TokenType.MULTI_LINE_COMMENT, _reader.getString(_tokenStart, 0));
1478 return next;
1479 } else if (0x2A == next) {
1480 next = _reader.advance();
1481 if (0x2F == next) {
1482 --nesting;
1483 if (0 == nesting) {
1484 _appendCommentToken(TokenType.MULTI_LINE_COMMENT,
1485 _reader.getString(_tokenStart, 0));
1486 return _reader.advance();
1487 } else {
1488 next = _reader.advance();
1489 }
1490 }
1491 } else if (0x2F == next) {
1492 next = _reader.advance();
1493 if (0x2A == next) {
1494 next = _reader.advance();
1495 ++nesting;
1496 }
1497 } else if (next == 0xD) {
1498 next = _reader.advance();
1499 if (next == 0xA) {
1500 next = _reader.advance();
1501 }
1502 recordStartOfLine();
1503 } else if (next == 0xA) {
1504 recordStartOfLine();
1505 next = _reader.advance();
1506 } else {
1507 next = _reader.advance();
1508 }
1509 }
1510 }
1511
1512 int _tokenizeMultiLineRawString(int quoteChar, int start) {
1513 int next = _reader.advance();
1514 outer: while (next != -1) {
1515 while (next != quoteChar) {
1516 if (next == -1) {
1517 break outer;
1518 } else if (next == 0xD) {
1519 next = _reader.advance();
1520 if (next == 0xA) {
1521 next = _reader.advance();
1522 }
1523 recordStartOfLine();
1524 } else if (next == 0xA) {
1525 next = _reader.advance();
1526 recordStartOfLine();
1527 } else {
1528 next = _reader.advance();
1529 }
1530 }
1531 next = _reader.advance();
1532 if (next == quoteChar) {
1533 next = _reader.advance();
1534 if (next == quoteChar) {
1535 _appendStringToken(TokenType.STRING, _reader.getString(start, 0));
1536 return _reader.advance();
1537 }
1538 }
1539 }
1540 _reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL);
1541 _appendStringToken(TokenType.STRING, _reader.getString(start, 0));
1542 return _reader.advance();
1543 }
1544
1545 int _tokenizeMultiLineString(int quoteChar, int start, bool raw) {
1546 if (raw) {
1547 return _tokenizeMultiLineRawString(quoteChar, start);
1548 }
1549 int next = _reader.advance();
1550 while (next != -1) {
1551 if (next == 0x24) {
1552 _appendStringToken(TokenType.STRING, _reader.getString(start, -1));
1553 next = _tokenizeStringInterpolation(start);
1554 _beginToken();
1555 start = _reader.offset;
1556 continue;
1557 }
1558 if (next == quoteChar) {
1559 next = _reader.advance();
1560 if (next == quoteChar) {
1561 next = _reader.advance();
1562 if (next == quoteChar) {
1563 _appendStringToken(TokenType.STRING, _reader.getString(start, 0));
1564 return _reader.advance();
1565 }
1566 }
1567 continue;
1568 }
1569 if (next == 0x5C) {
1570 next = _reader.advance();
1571 if (next == -1) {
1572 break;
1573 }
1574 if (next == 0xD) {
1575 next = _reader.advance();
1576 if (next == 0xA) {
1577 next = _reader.advance();
1578 }
1579 recordStartOfLine();
1580 } else if (next == 0xA) {
1581 recordStartOfLine();
1582 next = _reader.advance();
1583 } else {
1584 next = _reader.advance();
1585 }
1586 } else if (next == 0xD) {
1587 next = _reader.advance();
1588 if (next == 0xA) {
1589 next = _reader.advance();
1590 }
1591 recordStartOfLine();
1592 } else if (next == 0xA) {
1593 recordStartOfLine();
1594 next = _reader.advance();
1595 } else {
1596 next = _reader.advance();
1597 }
1598 }
1599 _reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL);
1600 if (start == _reader.offset) {
1601 _appendStringTokenWithOffset(TokenType.STRING, "", 1);
1602 } else {
1603 _appendStringToken(TokenType.STRING, _reader.getString(start, 0));
1604 }
1605 return _reader.advance();
1606 }
1607
1608 int _tokenizeMultiply(int next) =>
1609 _select(0x3D, TokenType.STAR_EQ, TokenType.STAR);
1610
1611 int _tokenizeNumber(int next) {
1612 int start = _reader.offset;
1613 while (true) {
1614 next = _reader.advance();
1615 if (0x30 <= next && next <= 0x39) {
1616 continue;
1617 } else if (next == 0x2E) {
1618 return _tokenizeFractionPart(_reader.advance(), start);
1619 } else if (next == 0x65 || next == 0x45) {
1620 return _tokenizeFractionPart(next, start);
1621 } else {
1622 _appendStringToken(
1623 TokenType.INT, _reader.getString(start, next < 0 ? 0 : -1));
1624 return next;
1625 }
1626 }
1627 }
1628
1629 int _tokenizeOpenSquareBracket(int next) {
1630 // [ [] []=
1631 next = _reader.advance();
1632 if (next == 0x5D) {
1633 return _select(0x3D, TokenType.INDEX_EQ, TokenType.INDEX);
1634 } else {
1635 _appendBeginToken(TokenType.OPEN_SQUARE_BRACKET);
1636 return next;
1637 }
1638 }
1639
1640 int _tokenizePercent(int next) =>
1641 _select(0x3D, TokenType.PERCENT_EQ, TokenType.PERCENT);
1642
1643 int _tokenizePlus(int next) {
1644 // + ++ +=
1645 next = _reader.advance();
1646 if (0x2B == next) {
1647 _appendTokenOfType(TokenType.PLUS_PLUS);
1648 return _reader.advance();
1649 } else if (0x3D == next) {
1650 _appendTokenOfType(TokenType.PLUS_EQ);
1651 return _reader.advance();
1652 } else {
1653 _appendTokenOfType(TokenType.PLUS);
1654 return next;
1655 }
1656 }
1657
1658 int _tokenizeQuestion() {
1659 // ? ?. ?? ??=
1660 int next = _reader.advance();
1661 if (next == 0x2E) {
1662 // '.'
1663 _appendTokenOfType(TokenType.QUESTION_PERIOD);
1664 return _reader.advance();
1665 } else if (next == 0x3F) {
1666 // '?'
1667 next = _reader.advance();
1668 if (next == 0x3D) {
1669 // '='
1670 _appendTokenOfType(TokenType.QUESTION_QUESTION_EQ);
1671 return _reader.advance();
1672 } else {
1673 _appendTokenOfType(TokenType.QUESTION_QUESTION);
1674 return next;
1675 }
1676 } else {
1677 _appendTokenOfType(TokenType.QUESTION);
1678 return next;
1679 }
1680 }
1681
1682 int _tokenizeSingleLineComment(int next) {
1683 while (true) {
1684 next = _reader.advance();
1685 if (-1 == next) {
1686 _appendCommentToken(
1687 TokenType.SINGLE_LINE_COMMENT, _reader.getString(_tokenStart, 0));
1688 return next;
1689 } else if (0xA == next || 0xD == next) {
1690 _appendCommentToken(
1691 TokenType.SINGLE_LINE_COMMENT, _reader.getString(_tokenStart, -1));
1692 return next;
1693 }
1694 }
1695 }
1696
1697 int _tokenizeSingleLineRawString(int next, int quoteChar, int start) {
1698 next = _reader.advance();
1699 while (next != -1) {
1700 if (next == quoteChar) {
1701 _appendStringToken(TokenType.STRING, _reader.getString(start, 0));
1702 return _reader.advance();
1703 } else if (next == 0xD || next == 0xA) {
1704 _reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL);
1705 _appendStringToken(TokenType.STRING, _reader.getString(start, -1));
1706 return _reader.advance();
1707 }
1708 next = _reader.advance();
1709 }
1710 _reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL);
1711 _appendStringToken(TokenType.STRING, _reader.getString(start, 0));
1712 return _reader.advance();
1713 }
1714
1715 int _tokenizeSingleLineString(int next, int quoteChar, int start) {
1716 while (next != quoteChar) {
1717 if (next == 0x5C) {
1718 next = _reader.advance();
1719 } else if (next == 0x24) {
1720 _appendStringToken(TokenType.STRING, _reader.getString(start, -1));
1721 next = _tokenizeStringInterpolation(start);
1722 _beginToken();
1723 start = _reader.offset;
1724 continue;
1725 }
1726 if (next <= 0xD && (next == 0xA || next == 0xD || next == -1)) {
1727 _reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL);
1728 if (start == _reader.offset) {
1729 _appendStringTokenWithOffset(TokenType.STRING, "", 1);
1730 } else if (next == -1) {
1731 _appendStringToken(TokenType.STRING, _reader.getString(start, 0));
1732 } else {
1733 _appendStringToken(TokenType.STRING, _reader.getString(start, -1));
1734 }
1735 return _reader.advance();
1736 }
1737 next = _reader.advance();
1738 }
1739 _appendStringToken(TokenType.STRING, _reader.getString(start, 0));
1740 return _reader.advance();
1741 }
1742
1743 int _tokenizeSlashOrComment(int next) {
1744 next = _reader.advance();
1745 if (0x2A == next) {
1746 return _tokenizeMultiLineComment(next);
1747 } else if (0x2F == next) {
1748 return _tokenizeSingleLineComment(next);
1749 } else if (0x3D == next) {
1750 _appendTokenOfType(TokenType.SLASH_EQ);
1751 return _reader.advance();
1752 } else {
1753 _appendTokenOfType(TokenType.SLASH);
1754 return next;
1755 }
1756 }
1757
1758 int _tokenizeString(int next, int start, bool raw) {
1759 int quoteChar = next;
1760 next = _reader.advance();
1761 if (quoteChar == next) {
1762 next = _reader.advance();
1763 if (quoteChar == next) {
1764 // Multiline string.
1765 return _tokenizeMultiLineString(quoteChar, start, raw);
1766 } else {
1767 // Empty string.
1768 _appendStringToken(TokenType.STRING, _reader.getString(start, -1));
1769 return next;
1770 }
1771 }
1772 if (raw) {
1773 return _tokenizeSingleLineRawString(next, quoteChar, start);
1774 } else {
1775 return _tokenizeSingleLineString(next, quoteChar, start);
1776 }
1777 }
1778
1779 int _tokenizeStringInterpolation(int start) {
1780 _beginToken();
1781 int next = _reader.advance();
1782 if (next == 0x7B) {
1783 return _tokenizeInterpolatedExpression(next, start);
1784 } else {
1785 return _tokenizeInterpolatedIdentifier(next, start);
1786 }
1787 }
1788
1789 int _tokenizeTag(int next) {
1790 // # or #!.*[\n\r]
1791 if (_reader.offset == 0) {
1792 if (_reader.peek() == 0x21) {
1793 do {
1794 next = _reader.advance();
1795 } while (next != 0xA && next != 0xD && next > 0);
1796 _appendStringToken(
1797 TokenType.SCRIPT_TAG, _reader.getString(_tokenStart, 0));
1798 return next;
1799 }
1800 }
1801 _appendTokenOfType(TokenType.HASH);
1802 return _reader.advance();
1803 }
1804
1805 int _tokenizeTilde(int next) {
1806 // ~ ~/ ~/=
1807 next = _reader.advance();
1808 if (next == 0x2F) {
1809 return _select(0x3D, TokenType.TILDE_SLASH_EQ, TokenType.TILDE_SLASH);
1810 } else {
1811 _appendTokenOfType(TokenType.TILDE);
1812 return next;
1813 }
1814 }
1815
1816 /**
1817 * Checks if [value] is a single-line or multi-line comment.
1818 */
1819 static bool _isDocumentationComment(String value) {
1820 return StringUtilities.startsWith3(value, 0, 0x2F, 0x2F, 0x2F) ||
1821 StringUtilities.startsWith3(value, 0, 0x2F, 0x2A, 0x2A);
1822 }
1823 }
1824
1825 /**
1826 * The error codes used for errors detected by the scanner.
1827 */
1828 class ScannerErrorCode extends ErrorCode {
1829 static const ScannerErrorCode ILLEGAL_CHARACTER =
1830 const ScannerErrorCode('ILLEGAL_CHARACTER', "Illegal character {0}");
1831
1832 static const ScannerErrorCode MISSING_DIGIT =
1833 const ScannerErrorCode('MISSING_DIGIT', "Decimal digit expected");
1834
1835 static const ScannerErrorCode MISSING_HEX_DIGIT =
1836 const ScannerErrorCode('MISSING_HEX_DIGIT', "Hexidecimal digit expected");
1837
1838 static const ScannerErrorCode MISSING_QUOTE =
1839 const ScannerErrorCode('MISSING_QUOTE', "Expected quote (' or \")");
1840
1841 static const ScannerErrorCode UNABLE_GET_CONTENT = const ScannerErrorCode(
1842 'UNABLE_GET_CONTENT', "Unable to get content: {0}");
1843
1844 static const ScannerErrorCode UNTERMINATED_MULTI_LINE_COMMENT =
1845 const ScannerErrorCode(
1846 'UNTERMINATED_MULTI_LINE_COMMENT', "Unterminated multi-line comment");
1847
1848 static const ScannerErrorCode UNTERMINATED_STRING_LITERAL =
1849 const ScannerErrorCode(
1850 'UNTERMINATED_STRING_LITERAL', "Unterminated string literal");
1851
1852 /**
1853 * Initialize a newly created error code to have the given [name]. The message
1854 * associated with the error will be created from the given [message]
1855 * template. The correction associated with the error will be created from the
1856 * given [correction] template.
1857 */
1858 const ScannerErrorCode(String name, String message, [String correction])
1859 : super(name, message, correction);
1860
1861 @override
1862 ErrorSeverity get errorSeverity => ErrorSeverity.ERROR;
1863
1864 @override
1865 ErrorType get type => ErrorType.SYNTACTIC_ERROR;
1866 }
1867
1868 /**
1869 * A token whose value is independent of it's type.
1870 */
1871 class StringToken extends Token {
1872 /**
1873 * The lexeme represented by this token.
1874 */
1875 String _value;
1876
1877 /**
1878 * Initialize a newly created token to represent a token of the given [type]
1879 * with the given [value] at the given [offset].
1880 */
1881 StringToken(TokenType type, String value, int offset) : super(type, offset) {
1882 this._value = StringUtilities.intern(value);
1883 }
1884
1885 @override
1886 String get lexeme => _value;
1887
1888 @override
1889 Token copy() => new StringToken(type, _value, offset);
1890
1891 @override
1892 String value() => _value;
1893 }
1894
1895 /**
1896 * A string token that is preceded by comments.
1897 */
1898 class StringTokenWithComment extends StringToken {
1899 /**
1900 * The first comment in the list of comments that precede this token.
1901 */
1902 CommentToken _precedingComment;
1903
1904 /**
1905 * Initialize a newly created token to have the given [type] at the given
1906 * [offset] and to be preceded by the comments reachable from the given
1907 * [comment].
1908 */
1909 StringTokenWithComment(
1910 TokenType type, String value, int offset, this._precedingComment)
1911 : super(type, value, offset) {
1912 _setCommentParent(_precedingComment);
1913 }
1914
1915 CommentToken get precedingComments => _precedingComment;
1916
1917 void set precedingComments(CommentToken comment) {
1918 _precedingComment = comment;
1919 _setCommentParent(_precedingComment);
1920 }
1921
1922 @override
1923 void applyDelta(int delta) {
1924 super.applyDelta(delta);
1925 Token token = precedingComments;
1926 while (token != null) {
1927 token.applyDelta(delta);
1928 token = token.next;
1929 }
1930 }
1931
1932 @override
1933 Token copy() => new StringTokenWithComment(
1934 type, lexeme, offset, copyComments(precedingComments));
1935 }
1936
1937 /**
1938 * A [CharacterReader] that reads characters from a character sequence, but adds
1939 * a delta when reporting the current character offset so that the character
1940 * sequence can be a subsequence from a larger sequence.
1941 */
1942 class SubSequenceReader extends CharSequenceReader {
1943 /**
1944 * The offset from the beginning of the file to the beginning of the source
1945 * being scanned.
1946 */
1947 final int _offsetDelta;
1948
1949 /**
1950 * Initialize a newly created reader to read the characters in the given
1951 * [sequence]. The [_offsetDelta] is the offset from the beginning of the file
1952 * to the beginning of the source being scanned
1953 */
1954 SubSequenceReader(String sequence, this._offsetDelta) : super(sequence);
1955
1956 @override
1957 int get offset => _offsetDelta + super.offset;
1958
1959 @override
1960 void set offset(int offset) {
1961 super.offset = offset - _offsetDelta;
1962 }
1963
1964 @override
1965 String getString(int start, int endDelta) =>
1966 super.getString(start - _offsetDelta, endDelta);
1967 }
1968
1969 /**
1970 * A token whose value is independent of it's type.
1971 */
1972 class SyntheticStringToken extends StringToken {
1973 /**
1974 * Initialize a newly created token to represent a token of the given [type]
1975 * with the given [value] at the given [offset].
1976 */
1977 SyntheticStringToken(TokenType type, String value, int offset)
1978 : super(type, value, offset);
1979
1980 @override
1981 bool get isSynthetic => true;
1982 }
1983
1984 /**
1985 * A token that was scanned from the input. Each token knows which tokens
1986 * precede and follow it, acting as a link in a doubly linked list of tokens.
1987 */
1988 class Token {
1989 /**
1990 * The type of the token.
1991 */
1992 final TokenType type;
1993
1994 /**
1995 * The offset from the beginning of the file to the first character in the
1996 * token.
1997 */
1998 int offset = 0;
1999
2000 /**
2001 * The previous token in the token stream.
2002 */
2003 Token previous;
2004
2005 /**
2006 * The next token in the token stream.
2007 */
2008 Token _next;
2009
2010 /**
2011 * Initialize a newly created token to have the given [type] and [offset].
2012 */
2013 Token(this.type, int offset) {
2014 this.offset = offset;
2015 }
2016
2017 /**
2018 * Return the offset from the beginning of the file to the character after the
2019 * last character of the token.
2020 */
2021 int get end => offset + length;
2022
2023 /**
2024 * Return `true` if this token represents an operator.
2025 */
2026 bool get isOperator => type.isOperator;
2027
2028 /**
2029 * Return `true` if this token is a synthetic token. A synthetic token is a
2030 * token that was introduced by the parser in order to recover from an error
2031 * in the code.
2032 */
2033 bool get isSynthetic => length == 0;
2034
2035 /**
2036 * Return `true` if this token represents an operator that can be defined by
2037 * users.
2038 */
2039 bool get isUserDefinableOperator => type.isUserDefinableOperator;
2040
2041 /**
2042 * Return the number of characters in the node's source range.
2043 */
2044 int get length => lexeme.length;
2045
2046 /**
2047 * Return the lexeme that represents this token.
2048 */
2049 String get lexeme => type.lexeme;
2050
2051 /**
2052 * Return the next token in the token stream.
2053 */
2054 Token get next => _next;
2055
2056 /**
2057 * Return the first comment in the list of comments that precede this token,
2058 * or `null` if there are no comments preceding this token. Additional
2059 * comments can be reached by following the token stream using [next] until
2060 * `null` is returned.
2061 *
2062 * For example, if the original contents were "/* one */ /* two */ id", then
2063 * the first preceding comment token will have a lexeme of "/* one */" and
2064 * the next comment token will have a lexeme of "/* two */".
2065 */
2066 CommentToken get precedingComments => null;
2067
2068 /**
2069 * Apply (add) the given [delta] to this token's offset.
2070 */
2071 void applyDelta(int delta) {
2072 offset += delta;
2073 }
2074
2075 /**
2076 * Return a newly created token that is a copy of this token but that is not a
2077 * part of any token stream.
2078 */
2079 Token copy() => new Token(type, offset);
2080
2081 /**
2082 * Copy a linked list of comment tokens identical to the given comment tokens.
2083 */
2084 Token copyComments(Token token) {
2085 if (token == null) {
2086 return null;
2087 }
2088 Token head = token.copy();
2089 Token tail = head;
2090 token = token.next;
2091 while (token != null) {
2092 tail = tail.setNext(token.copy());
2093 token = token.next;
2094 }
2095 return head;
2096 }
2097
2098 /**
2099 * Return `true` if this token has any one of the given [types].
2100 */
2101 bool matchesAny(List<TokenType> types) {
2102 for (TokenType type in types) {
2103 if (this.type == type) {
2104 return true;
2105 }
2106 }
2107 return false;
2108 }
2109
2110 /**
2111 * Set the next token in the token stream to the given [token]. This has the
2112 * side-effect of setting this token to be the previous token for the given
2113 * token. Return the token that was passed in.
2114 */
2115 Token setNext(Token token) {
2116 _next = token;
2117 token.previous = this;
2118 return token;
2119 }
2120
2121 /**
2122 * Set the next token in the token stream to the given token without changing
2123 * which token is the previous token for the given token. Return the token
2124 * that was passed in.
2125 */
2126 Token setNextWithoutSettingPrevious(Token token) {
2127 _next = token;
2128 return token;
2129 }
2130
2131 @override
2132 String toString() => lexeme;
2133
2134 /**
2135 * Return the value of this token. For keyword tokens, this is the keyword
2136 * associated with the token, for other tokens it is the lexeme associated
2137 * with the token.
2138 */
2139 Object value() => type.lexeme;
2140
2141 /**
2142 * Sets the `parent` property to `this` for the given [comment] and all the
2143 * next tokens.
2144 */
2145 void _setCommentParent(CommentToken comment) {
2146 while (comment != null) {
2147 comment.parent = this;
2148 comment = comment.next;
2149 }
2150 }
2151
2152 /**
2153 * Compare the given [tokens] to find the token that appears first in the
2154 * source being parsed. That is, return the left-most of all of the tokens.
2155 * The list must be non-`null`, but the elements of the list are allowed to be
2156 * `null`. Return the token with the smallest offset, or `null` if the list is
2157 * empty or if all of the elements of the list are `null`.
2158 */
2159 static Token lexicallyFirst(List<Token> tokens) {
2160 Token first = null;
2161 int offset = -1;
2162 for (Token token in tokens) {
2163 if (token != null && (offset < 0 || token.offset < offset)) {
2164 first = token;
2165 offset = token.offset;
2166 }
2167 }
2168 return first;
2169 }
2170 }
2171
2172 /**
2173 * The classes (or groups) of tokens with a similar use.
2174 */
2175 class TokenClass {
2176 /**
2177 * A value used to indicate that the token type is not part of any specific
2178 * class of token.
2179 */
2180 static const TokenClass NO_CLASS = const TokenClass('NO_CLASS');
2181
2182 /**
2183 * A value used to indicate that the token type is an additive operator.
2184 */
2185 static const TokenClass ADDITIVE_OPERATOR =
2186 const TokenClass('ADDITIVE_OPERATOR', 13);
2187
2188 /**
2189 * A value used to indicate that the token type is an assignment operator.
2190 */
2191 static const TokenClass ASSIGNMENT_OPERATOR =
2192 const TokenClass('ASSIGNMENT_OPERATOR', 1);
2193
2194 /**
2195 * A value used to indicate that the token type is a bitwise-and operator.
2196 */
2197 static const TokenClass BITWISE_AND_OPERATOR =
2198 const TokenClass('BITWISE_AND_OPERATOR', 11);
2199
2200 /**
2201 * A value used to indicate that the token type is a bitwise-or operator.
2202 */
2203 static const TokenClass BITWISE_OR_OPERATOR =
2204 const TokenClass('BITWISE_OR_OPERATOR', 9);
2205
2206 /**
2207 * A value used to indicate that the token type is a bitwise-xor operator.
2208 */
2209 static const TokenClass BITWISE_XOR_OPERATOR =
2210 const TokenClass('BITWISE_XOR_OPERATOR', 10);
2211
2212 /**
2213 * A value used to indicate that the token type is a cascade operator.
2214 */
2215 static const TokenClass CASCADE_OPERATOR =
2216 const TokenClass('CASCADE_OPERATOR', 2);
2217
2218 /**
2219 * A value used to indicate that the token type is a conditional operator.
2220 */
2221 static const TokenClass CONDITIONAL_OPERATOR =
2222 const TokenClass('CONDITIONAL_OPERATOR', 3);
2223
2224 /**
2225 * A value used to indicate that the token type is an equality operator.
2226 */
2227 static const TokenClass EQUALITY_OPERATOR =
2228 const TokenClass('EQUALITY_OPERATOR', 7);
2229
2230 /**
2231 * A value used to indicate that the token type is an if-null operator.
2232 */
2233 static const TokenClass IF_NULL_OPERATOR =
2234 const TokenClass('IF_NULL_OPERATOR', 4);
2235
2236 /**
2237 * A value used to indicate that the token type is a logical-and operator.
2238 */
2239 static const TokenClass LOGICAL_AND_OPERATOR =
2240 const TokenClass('LOGICAL_AND_OPERATOR', 6);
2241
2242 /**
2243 * A value used to indicate that the token type is a logical-or operator.
2244 */
2245 static const TokenClass LOGICAL_OR_OPERATOR =
2246 const TokenClass('LOGICAL_OR_OPERATOR', 5);
2247
2248 /**
2249 * A value used to indicate that the token type is a multiplicative operator.
2250 */
2251 static const TokenClass MULTIPLICATIVE_OPERATOR =
2252 const TokenClass('MULTIPLICATIVE_OPERATOR', 14);
2253
2254 /**
2255 * A value used to indicate that the token type is a relational operator.
2256 */
2257 static const TokenClass RELATIONAL_OPERATOR =
2258 const TokenClass('RELATIONAL_OPERATOR', 8);
2259
2260 /**
2261 * A value used to indicate that the token type is a shift operator.
2262 */
2263 static const TokenClass SHIFT_OPERATOR =
2264 const TokenClass('SHIFT_OPERATOR', 12);
2265
2266 /**
2267 * A value used to indicate that the token type is a unary operator.
2268 */
2269 static const TokenClass UNARY_POSTFIX_OPERATOR =
2270 const TokenClass('UNARY_POSTFIX_OPERATOR', 16);
2271
2272 /**
2273 * A value used to indicate that the token type is a unary operator.
2274 */
2275 static const TokenClass UNARY_PREFIX_OPERATOR =
2276 const TokenClass('UNARY_PREFIX_OPERATOR', 15);
2277
2278 /**
2279 * The name of the token class.
2280 */
2281 final String name;
2282
2283 /**
2284 * The precedence of tokens of this class, or `0` if the such tokens do not
2285 * represent an operator.
2286 */
2287 final int precedence;
2288
2289 const TokenClass(this.name, [this.precedence = 0]);
2290
2291 @override
2292 String toString() => name;
2293 }
2294
2295 /**
2296 * The types of tokens that can be returned by the scanner.
2297 */
2298 class TokenType {
2299 /**
2300 * The type of the token that marks the end of the input.
2301 */
2302 static const TokenType EOF = const TokenType_EOF('EOF');
2303
2304 static const TokenType DOUBLE = const TokenType('DOUBLE');
2305
2306 static const TokenType HEXADECIMAL = const TokenType('HEXADECIMAL');
2307
2308 static const TokenType IDENTIFIER = const TokenType('IDENTIFIER');
2309
2310 static const TokenType INT = const TokenType('INT');
2311
2312 static const TokenType KEYWORD = const TokenType('KEYWORD');
2313
2314 static const TokenType MULTI_LINE_COMMENT =
2315 const TokenType('MULTI_LINE_COMMENT');
2316
2317 static const TokenType SCRIPT_TAG = const TokenType('SCRIPT_TAG');
2318
2319 static const TokenType SINGLE_LINE_COMMENT =
2320 const TokenType('SINGLE_LINE_COMMENT');
2321
2322 static const TokenType STRING = const TokenType('STRING');
2323
2324 static const TokenType AMPERSAND =
2325 const TokenType('AMPERSAND', TokenClass.BITWISE_AND_OPERATOR, "&");
2326
2327 static const TokenType AMPERSAND_AMPERSAND = const TokenType(
2328 'AMPERSAND_AMPERSAND', TokenClass.LOGICAL_AND_OPERATOR, "&&");
2329
2330 static const TokenType AMPERSAND_EQ =
2331 const TokenType('AMPERSAND_EQ', TokenClass.ASSIGNMENT_OPERATOR, "&=");
2332
2333 static const TokenType AT = const TokenType('AT', TokenClass.NO_CLASS, "@");
2334
2335 static const TokenType BANG =
2336 const TokenType('BANG', TokenClass.UNARY_PREFIX_OPERATOR, "!");
2337
2338 static const TokenType BANG_EQ =
2339 const TokenType('BANG_EQ', TokenClass.EQUALITY_OPERATOR, "!=");
2340
2341 static const TokenType BAR =
2342 const TokenType('BAR', TokenClass.BITWISE_OR_OPERATOR, "|");
2343
2344 static const TokenType BAR_BAR =
2345 const TokenType('BAR_BAR', TokenClass.LOGICAL_OR_OPERATOR, "||");
2346
2347 static const TokenType BAR_EQ =
2348 const TokenType('BAR_EQ', TokenClass.ASSIGNMENT_OPERATOR, "|=");
2349
2350 static const TokenType COLON =
2351 const TokenType('COLON', TokenClass.NO_CLASS, ":");
2352
2353 static const TokenType COMMA =
2354 const TokenType('COMMA', TokenClass.NO_CLASS, ",");
2355
2356 static const TokenType CARET =
2357 const TokenType('CARET', TokenClass.BITWISE_XOR_OPERATOR, "^");
2358
2359 static const TokenType CARET_EQ =
2360 const TokenType('CARET_EQ', TokenClass.ASSIGNMENT_OPERATOR, "^=");
2361
2362 static const TokenType CLOSE_CURLY_BRACKET =
2363 const TokenType('CLOSE_CURLY_BRACKET', TokenClass.NO_CLASS, "}");
2364
2365 static const TokenType CLOSE_PAREN =
2366 const TokenType('CLOSE_PAREN', TokenClass.NO_CLASS, ")");
2367
2368 static const TokenType CLOSE_SQUARE_BRACKET =
2369 const TokenType('CLOSE_SQUARE_BRACKET', TokenClass.NO_CLASS, "]");
2370
2371 static const TokenType EQ =
2372 const TokenType('EQ', TokenClass.ASSIGNMENT_OPERATOR, "=");
2373
2374 static const TokenType EQ_EQ =
2375 const TokenType('EQ_EQ', TokenClass.EQUALITY_OPERATOR, "==");
2376
2377 static const TokenType FUNCTION =
2378 const TokenType('FUNCTION', TokenClass.NO_CLASS, "=>");
2379
2380 static const TokenType GT =
2381 const TokenType('GT', TokenClass.RELATIONAL_OPERATOR, ">");
2382
2383 static const TokenType GT_EQ =
2384 const TokenType('GT_EQ', TokenClass.RELATIONAL_OPERATOR, ">=");
2385
2386 static const TokenType GT_GT =
2387 const TokenType('GT_GT', TokenClass.SHIFT_OPERATOR, ">>");
2388
2389 static const TokenType GT_GT_EQ =
2390 const TokenType('GT_GT_EQ', TokenClass.ASSIGNMENT_OPERATOR, ">>=");
2391
2392 static const TokenType HASH =
2393 const TokenType('HASH', TokenClass.NO_CLASS, "#");
2394
2395 static const TokenType INDEX =
2396 const TokenType('INDEX', TokenClass.UNARY_POSTFIX_OPERATOR, "[]");
2397
2398 static const TokenType INDEX_EQ =
2399 const TokenType('INDEX_EQ', TokenClass.UNARY_POSTFIX_OPERATOR, "[]=");
2400
2401 static const TokenType IS =
2402 const TokenType('IS', TokenClass.RELATIONAL_OPERATOR, "is");
2403
2404 static const TokenType LT =
2405 const TokenType('LT', TokenClass.RELATIONAL_OPERATOR, "<");
2406
2407 static const TokenType LT_EQ =
2408 const TokenType('LT_EQ', TokenClass.RELATIONAL_OPERATOR, "<=");
2409
2410 static const TokenType LT_LT =
2411 const TokenType('LT_LT', TokenClass.SHIFT_OPERATOR, "<<");
2412
2413 static const TokenType LT_LT_EQ =
2414 const TokenType('LT_LT_EQ', TokenClass.ASSIGNMENT_OPERATOR, "<<=");
2415
2416 static const TokenType MINUS =
2417 const TokenType('MINUS', TokenClass.ADDITIVE_OPERATOR, "-");
2418
2419 static const TokenType MINUS_EQ =
2420 const TokenType('MINUS_EQ', TokenClass.ASSIGNMENT_OPERATOR, "-=");
2421
2422 static const TokenType MINUS_MINUS =
2423 const TokenType('MINUS_MINUS', TokenClass.UNARY_PREFIX_OPERATOR, "--");
2424
2425 static const TokenType OPEN_CURLY_BRACKET =
2426 const TokenType('OPEN_CURLY_BRACKET', TokenClass.NO_CLASS, "{");
2427
2428 static const TokenType OPEN_PAREN =
2429 const TokenType('OPEN_PAREN', TokenClass.UNARY_POSTFIX_OPERATOR, "(");
2430
2431 static const TokenType OPEN_SQUARE_BRACKET = const TokenType(
2432 'OPEN_SQUARE_BRACKET', TokenClass.UNARY_POSTFIX_OPERATOR, "[");
2433
2434 static const TokenType PERCENT =
2435 const TokenType('PERCENT', TokenClass.MULTIPLICATIVE_OPERATOR, "%");
2436
2437 static const TokenType PERCENT_EQ =
2438 const TokenType('PERCENT_EQ', TokenClass.ASSIGNMENT_OPERATOR, "%=");
2439
2440 static const TokenType PERIOD =
2441 const TokenType('PERIOD', TokenClass.UNARY_POSTFIX_OPERATOR, ".");
2442
2443 static const TokenType PERIOD_PERIOD =
2444 const TokenType('PERIOD_PERIOD', TokenClass.CASCADE_OPERATOR, "..");
2445
2446 static const TokenType PLUS =
2447 const TokenType('PLUS', TokenClass.ADDITIVE_OPERATOR, "+");
2448
2449 static const TokenType PLUS_EQ =
2450 const TokenType('PLUS_EQ', TokenClass.ASSIGNMENT_OPERATOR, "+=");
2451
2452 static const TokenType PLUS_PLUS =
2453 const TokenType('PLUS_PLUS', TokenClass.UNARY_PREFIX_OPERATOR, "++");
2454
2455 static const TokenType QUESTION =
2456 const TokenType('QUESTION', TokenClass.CONDITIONAL_OPERATOR, "?");
2457
2458 static const TokenType QUESTION_PERIOD = const TokenType(
2459 'QUESTION_PERIOD', TokenClass.UNARY_POSTFIX_OPERATOR, '?.');
2460
2461 static const TokenType QUESTION_QUESTION =
2462 const TokenType('QUESTION_QUESTION', TokenClass.IF_NULL_OPERATOR, '??');
2463
2464 static const TokenType QUESTION_QUESTION_EQ = const TokenType(
2465 'QUESTION_QUESTION_EQ', TokenClass.ASSIGNMENT_OPERATOR, '??=');
2466
2467 static const TokenType SEMICOLON =
2468 const TokenType('SEMICOLON', TokenClass.NO_CLASS, ";");
2469
2470 static const TokenType SLASH =
2471 const TokenType('SLASH', TokenClass.MULTIPLICATIVE_OPERATOR, "/");
2472
2473 static const TokenType SLASH_EQ =
2474 const TokenType('SLASH_EQ', TokenClass.ASSIGNMENT_OPERATOR, "/=");
2475
2476 static const TokenType STAR =
2477 const TokenType('STAR', TokenClass.MULTIPLICATIVE_OPERATOR, "*");
2478
2479 static const TokenType STAR_EQ =
2480 const TokenType('STAR_EQ', TokenClass.ASSIGNMENT_OPERATOR, "*=");
2481
2482 static const TokenType STRING_INTERPOLATION_EXPRESSION = const TokenType(
2483 'STRING_INTERPOLATION_EXPRESSION', TokenClass.NO_CLASS, "\${");
2484
2485 static const TokenType STRING_INTERPOLATION_IDENTIFIER = const TokenType(
2486 'STRING_INTERPOLATION_IDENTIFIER', TokenClass.NO_CLASS, "\$");
2487
2488 static const TokenType TILDE =
2489 const TokenType('TILDE', TokenClass.UNARY_PREFIX_OPERATOR, "~");
2490
2491 static const TokenType TILDE_SLASH =
2492 const TokenType('TILDE_SLASH', TokenClass.MULTIPLICATIVE_OPERATOR, "~/");
2493
2494 static const TokenType TILDE_SLASH_EQ =
2495 const TokenType('TILDE_SLASH_EQ', TokenClass.ASSIGNMENT_OPERATOR, "~/=");
2496
2497 static const TokenType BACKPING =
2498 const TokenType('BACKPING', TokenClass.NO_CLASS, "`");
2499
2500 static const TokenType BACKSLASH =
2501 const TokenType('BACKSLASH', TokenClass.NO_CLASS, "\\");
2502
2503 static const TokenType PERIOD_PERIOD_PERIOD =
2504 const TokenType('PERIOD_PERIOD_PERIOD', TokenClass.NO_CLASS, "...");
2505
2506 /**
2507 * The class of the token.
2508 */
2509 final TokenClass _tokenClass;
2510
2511 /**
2512 * The name of the token type.
2513 */
2514 final String name;
2515
2516 /**
2517 * The lexeme that defines this type of token, or `null` if there is more than
2518 * one possible lexeme for this type of token.
2519 */
2520 final String lexeme;
2521
2522 const TokenType(this.name,
2523 [this._tokenClass = TokenClass.NO_CLASS, this.lexeme = null]);
2524
2525 /**
2526 * Return `true` if this type of token represents an additive operator.
2527 */
2528 bool get isAdditiveOperator => _tokenClass == TokenClass.ADDITIVE_OPERATOR;
2529
2530 /**
2531 * Return `true` if this type of token represents an assignment operator.
2532 */
2533 bool get isAssignmentOperator =>
2534 _tokenClass == TokenClass.ASSIGNMENT_OPERATOR;
2535
2536 /**
2537 * Return `true` if this type of token represents an associative operator. An
2538 * associative operator is an operator for which the following equality is
2539 * true: `(a * b) * c == a * (b * c)`. In other words, if the result of
2540 * applying the operator to multiple operands does not depend on the order in
2541 * which those applications occur.
2542 *
2543 * Note: This method considers the logical-and and logical-or operators to be
2544 * associative, even though the order in which the application of those
2545 * operators can have an effect because evaluation of the right-hand operand
2546 * is conditional.
2547 */
2548 bool get isAssociativeOperator => this == AMPERSAND ||
2549 this == AMPERSAND_AMPERSAND ||
2550 this == BAR ||
2551 this == BAR_BAR ||
2552 this == CARET ||
2553 this == PLUS ||
2554 this == STAR;
2555
2556 /**
2557 * Return `true` if this type of token represents an equality operator.
2558 */
2559 bool get isEqualityOperator => _tokenClass == TokenClass.EQUALITY_OPERATOR;
2560
2561 /**
2562 * Return `true` if this type of token represents an increment operator.
2563 */
2564 bool get isIncrementOperator =>
2565 identical(lexeme, "++") || identical(lexeme, "--");
2566
2567 /**
2568 * Return `true` if this type of token represents a multiplicative operator.
2569 */
2570 bool get isMultiplicativeOperator =>
2571 _tokenClass == TokenClass.MULTIPLICATIVE_OPERATOR;
2572
2573 /**
2574 * Return `true` if this token type represents an operator.
2575 */
2576 bool get isOperator => _tokenClass != TokenClass.NO_CLASS &&
2577 this != OPEN_PAREN &&
2578 this != OPEN_SQUARE_BRACKET &&
2579 this != PERIOD;
2580
2581 /**
2582 * Return `true` if this type of token represents a relational operator.
2583 */
2584 bool get isRelationalOperator =>
2585 _tokenClass == TokenClass.RELATIONAL_OPERATOR;
2586
2587 /**
2588 * Return `true` if this type of token represents a shift operator.
2589 */
2590 bool get isShiftOperator => _tokenClass == TokenClass.SHIFT_OPERATOR;
2591
2592 /**
2593 * Return `true` if this type of token represents a unary postfix operator.
2594 */
2595 bool get isUnaryPostfixOperator =>
2596 _tokenClass == TokenClass.UNARY_POSTFIX_OPERATOR;
2597
2598 /**
2599 * Return `true` if this type of token represents a unary prefix operator.
2600 */
2601 bool get isUnaryPrefixOperator =>
2602 _tokenClass == TokenClass.UNARY_PREFIX_OPERATOR;
2603
2604 /**
2605 * Return `true` if this token type represents an operator that can be defined
2606 * by users.
2607 */
2608 bool get isUserDefinableOperator => identical(lexeme, "==") ||
2609 identical(lexeme, "~") ||
2610 identical(lexeme, "[]") ||
2611 identical(lexeme, "[]=") ||
2612 identical(lexeme, "*") ||
2613 identical(lexeme, "/") ||
2614 identical(lexeme, "%") ||
2615 identical(lexeme, "~/") ||
2616 identical(lexeme, "+") ||
2617 identical(lexeme, "-") ||
2618 identical(lexeme, "<<") ||
2619 identical(lexeme, ">>") ||
2620 identical(lexeme, ">=") ||
2621 identical(lexeme, ">") ||
2622 identical(lexeme, "<=") ||
2623 identical(lexeme, "<") ||
2624 identical(lexeme, "&") ||
2625 identical(lexeme, "^") ||
2626 identical(lexeme, "|");
2627
2628 /**
2629 * Return the precedence of the token, or `0` if the token does not represent
2630 * an operator.
2631 */
2632 int get precedence => _tokenClass.precedence;
2633
2634 @override
2635 String toString() => name;
2636 }
2637
2638 class TokenType_EOF extends TokenType {
2639 const TokenType_EOF(String name) : super(name, TokenClass.NO_CLASS, "");
2640
2641 @override
2642 String toString() => "-eof-";
2643 }
2644
2645 /**
2646 * A normal token that is preceded by comments.
2647 */
2648 class TokenWithComment extends Token {
2649 /**
2650 * The first comment in the list of comments that precede this token.
2651 */
2652 CommentToken _precedingComment;
2653
2654 /**
2655 * Initialize a newly created token to have the given [type] at the given
2656 * [offset] and to be preceded by the comments reachable from the given
2657 * [comment].
2658 */
2659 TokenWithComment(TokenType type, int offset, this._precedingComment)
2660 : super(type, offset) {
2661 _setCommentParent(_precedingComment);
2662 }
2663
2664 CommentToken get precedingComments => _precedingComment;
2665
2666 void set precedingComments(CommentToken comment) {
2667 _precedingComment = comment;
2668 _setCommentParent(_precedingComment);
2669 }
2670
2671 @override
2672 Token copy() => new TokenWithComment(type, offset, precedingComments);
2673 }
OLDNEW
« no previous file with comments | « analyzer/lib/src/generated/resolver.dart ('k') | analyzer/lib/src/generated/sdk.dart » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698