Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(927)

Side by Side Diff: observatory_pub_packages/analyzer/src/generated/scanner.dart

Issue 816693004: Add observatory_pub_packages snapshot to third_party (Closed) Base URL: http://dart.googlecode.com/svn/third_party/
Patch Set: Created 6 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file.
4
5 // This code was auto-generated, is not intended to be edited, and is subject to
6 // significant change. Please see the README file for more information.
7
8 library engine.scanner;
9
10 import 'dart:collection';
11 import 'java_core.dart';
12 import 'java_engine.dart';
13 import 'source.dart';
14 import 'error.dart';
15 import 'instrumentation.dart';
16 import 'utilities_collection.dart' show TokenMap;
17
18 /**
19 * Instances of the class `BeginToken` represent the opening half of a grouping pair of
20 * tokens. This is used for curly brackets ('{'), parentheses ('('), and square brackets ('[').
21 */
22 class BeginToken extends Token {
23 /**
24 * The token that corresponds to this token.
25 */
26 Token endToken;
27
28 /**
29 * Initialize a newly created token representing the opening half of a groupin g pair of tokens.
30 *
31 * @param type the type of the token
32 * @param offset the offset from the beginning of the file to the first charac ter in the token
33 */
34 BeginToken(TokenType type, int offset) : super(type, offset) {
35 assert((type == TokenType.OPEN_CURLY_BRACKET || type == TokenType.OPEN_PAREN || type == TokenType.OPEN_SQUARE_BRACKET || type == TokenType.STRING_INTERPOLAT ION_EXPRESSION));
36 }
37
38 @override
39 Token copy() => new BeginToken(type, offset);
40 }
41
42 /**
43 * Instances of the class `BeginTokenWithComment` represent a begin token that i s preceded by
44 * comments.
45 */
46 class BeginTokenWithComment extends BeginToken {
47 /**
48 * The first comment in the list of comments that precede this token.
49 */
50 final Token _precedingComment;
51
52 /**
53 * Initialize a newly created token to have the given type and offset and to b e preceded by the
54 * comments reachable from the given comment.
55 *
56 * @param type the type of the token
57 * @param offset the offset from the beginning of the file to the first charac ter in the token
58 * @param precedingComment the first comment in the list of comments that prec ede this token
59 */
60 BeginTokenWithComment(TokenType type, int offset, this._precedingComment) : su per(type, offset);
61
62 @override
63 Token copy() => new BeginTokenWithComment(type, offset, copyComments(_precedin gComment));
64
65 @override
66 Token get precedingComments => _precedingComment;
67
68 @override
69 void applyDelta(int delta) {
70 super.applyDelta(delta);
71 Token token = _precedingComment;
72 while (token != null) {
73 token.applyDelta(delta);
74 token = token.next;
75 }
76 }
77 }
78
79 /**
80 * Instances of the class `CharSequenceReader` implement a [CharacterReader] tha t reads
81 * characters from a character sequence.
82 */
83 class CharSequenceReader implements CharacterReader {
84 /**
85 * The sequence from which characters will be read.
86 */
87 final String _sequence;
88
89 /**
90 * The number of characters in the string.
91 */
92 int _stringLength = 0;
93
94 /**
95 * The index, relative to the string, of the last character that was read.
96 */
97 int _charOffset = 0;
98
99 /**
100 * Initialize a newly created reader to read the characters in the given seque nce.
101 *
102 * @param sequence the sequence from which characters will be read
103 */
104 CharSequenceReader(this._sequence) {
105 this._stringLength = _sequence.length;
106 this._charOffset = -1;
107 }
108
109 @override
110 int advance() {
111 if (_charOffset + 1 >= _stringLength) {
112 return -1;
113 }
114 return _sequence.codeUnitAt(++_charOffset);
115 }
116
117 @override
118 int get offset => _charOffset;
119
120 @override
121 String getString(int start, int endDelta) => _sequence.substring(start, _charO ffset + 1 + endDelta).toString();
122
123 @override
124 int peek() {
125 if (_charOffset + 1 >= _sequence.length) {
126 return -1;
127 }
128 return _sequence.codeUnitAt(_charOffset + 1);
129 }
130
131 @override
132 void set offset(int offset) {
133 _charOffset = offset;
134 }
135 }
136
137 /**
138 * The interface `CharacterReader`
139 */
140 abstract class CharacterReader {
141 /**
142 * Advance the current position and return the character at the new current po sition.
143 *
144 * @return the character at the new current position
145 */
146 int advance();
147
148 /**
149 * Return the current offset relative to the beginning of the source. Return t he initial offset if
150 * the scanner has not yet scanned the source code, and one (1) past the end o f the source code if
151 * the entire source code has been scanned.
152 *
153 * @return the current offset of the scanner in the source
154 */
155 int get offset;
156
157 /**
158 * Return the substring of the source code between the start offset and the mo dified current
159 * position. The current position is modified by adding the end delta.
160 *
161 * @param start the offset to the beginning of the string, relative to the sta rt of the file
162 * @param endDelta the number of characters after the current location to be i ncluded in the
163 * string, or the number of characters before the current location to be excluded if the
164 * offset is negative
165 * @return the specified substring of the source code
166 */
167 String getString(int start, int endDelta);
168
169 /**
170 * Return the character at the current position without changing the current p osition.
171 *
172 * @return the character at the current position
173 */
174 int peek();
175
176 /**
177 * Set the current offset relative to the beginning of the source. The new off set must be between
178 * the initial offset and one (1) past the end of the source code.
179 *
180 * @param offset the new offset in the source
181 */
182 void set offset(int offset);
183 }
184
185 /**
186 * Instances of the class `IncrementalScanner` implement a scanner that scans a subset of a
187 * string and inserts the resulting tokens into the middle of an existing token stream.
188 */
189 class IncrementalScanner extends Scanner {
190 /**
191 * The reader used to access the characters in the source.
192 */
193 CharacterReader _reader;
194
195 /**
196 * A map from tokens that were copied to the copies of the tokens.
197 */
198 TokenMap _tokenMap = new TokenMap();
199
200 /**
201 * The token in the new token stream immediately to the left of the range of t okens that were
202 * inserted, or the token immediately to the left of the modified region if th ere were no new
203 * tokens.
204 */
205 Token _leftToken;
206
207 /**
208 * The token in the new token stream immediately to the right of the range of tokens that were
209 * inserted, or the token immediately to the right of the modified region if t here were no new
210 * tokens.
211 */
212 Token _rightToken;
213
214 /**
215 * A flag indicating whether there were any tokens changed as a result of the modification.
216 */
217 bool _hasNonWhitespaceChange = false;
218
219 /**
220 * Initialize a newly created scanner.
221 *
222 * @param source the source being scanned
223 * @param reader the character reader used to read the characters in the sourc e
224 * @param errorListener the error listener that will be informed of any errors that are found
225 */
226 IncrementalScanner(Source source, CharacterReader reader, AnalysisErrorListene r errorListener) : super(source, reader, errorListener) {
227 this._reader = reader;
228 }
229
230 /**
231 * Return the token in the new token stream immediately to the left of the ran ge of tokens that
232 * were inserted, or the token immediately to the left of the modified region if there were no new
233 * tokens.
234 *
235 * @return the token to the left of the inserted tokens
236 */
237 Token get leftToken => _leftToken;
238
239 /**
240 * Return the token in the new token stream immediately to the right of the ra nge of tokens that
241 * were inserted, or the token immediately to the right of the modified region if there were no
242 * new tokens.
243 *
244 * @return the token to the right of the inserted tokens
245 */
246 Token get rightToken => _rightToken;
247
248 /**
249 * Return a map from tokens that were copied to the copies of the tokens.
250 *
251 * @return a map from tokens that were copied to the copies of the tokens
252 */
253 TokenMap get tokenMap => _tokenMap;
254
255 /**
256 * Return `true` if there were any tokens either added or removed (or both) as a result of
257 * the modification.
258 *
259 * @return `true` if there were any tokens changed as a result of the modifica tion
260 */
261 bool get hasNonWhitespaceChange => _hasNonWhitespaceChange;
262
263 /**
264 * Given the stream of tokens scanned from the original source, the modified s ource (the result of
265 * replacing one contiguous range of characters with another string of charact ers), and a
266 * specification of the modification that was made, return a stream of tokens scanned from the
267 * modified source. The original stream of tokens will not be modified.
268 *
269 * @param originalStream the stream of tokens scanned from the original source
270 * @param index the index of the first character in both the original and modi fied source that was
271 * affected by the modification
272 * @param removedLength the number of characters removed from the original sou rce
273 * @param insertedLength the number of characters added to the modified source
274 */
275 Token rescan(Token originalStream, int index, int removedLength, int insertedL ength) {
276 //
277 // Copy all of the tokens in the originalStream whose end is less than the r eplacement start.
278 // (If the replacement start is equal to the end of an existing token, then it means that the
279 // existing token might have been modified, so we need to rescan it.)
280 //
281 while (originalStream.type != TokenType.EOF && originalStream.end < index) {
282 originalStream = _copyAndAdvance(originalStream, 0);
283 }
284 Token oldFirst = originalStream;
285 Token oldLeftToken = originalStream.previous;
286 _leftToken = tail;
287 //
288 // Skip tokens in the original stream until we find a token whose offset is greater than the end
289 // of the removed region. (If the end of the removed region is equal to the beginning of an
290 // existing token, then it means that the existing token might have been mod ified, so we need to
291 // rescan it.)
292 //
293 int removedEnd = index + (removedLength == 0 ? 0 : removedLength - 1);
294 while (originalStream.type != TokenType.EOF && originalStream.offset <= remo vedEnd) {
295 originalStream = originalStream.next;
296 }
297 Token oldLast;
298 Token oldRightToken;
299 if (originalStream.type != TokenType.EOF && removedEnd + 1 == originalStream .offset) {
300 oldLast = originalStream;
301 originalStream = originalStream.next;
302 oldRightToken = originalStream;
303 } else {
304 oldLast = originalStream.previous;
305 oldRightToken = originalStream;
306 }
307 //
308 // Compute the delta between the character index of characters after the mod ified region in the
309 // original source and the index of the corresponding character in the modif ied source.
310 //
311 int delta = insertedLength - removedLength;
312 //
313 // Compute the range of characters that are known to need to be rescanned. I f the index is
314 // within an existing token, then we need to start at the beginning of the t oken.
315 //
316 int scanStart = Math.min(oldFirst.offset, index);
317 int oldEnd = oldLast.end + delta - 1;
318 int newEnd = index + insertedLength - 1;
319 int scanEnd = Math.max(newEnd, oldEnd);
320 //
321 // Starting at the start of the scan region, scan tokens from the modifiedSo urce until the end
322 // of the just scanned token is greater than or equal to end of the scan reg ion in the modified
323 // source. Include trailing characters of any token that was split as a resu lt of inserted text,
324 // as in "ab" --> "a.b".
325 //
326 _reader.offset = scanStart - 1;
327 int next = _reader.advance();
328 while (next != -1 && _reader.offset <= scanEnd) {
329 next = bigSwitch(next);
330 }
331 //
332 // Copy the remaining tokens in the original stream, but apply the delta to the token's offset.
333 //
334 if (originalStream.type == TokenType.EOF) {
335 _copyAndAdvance(originalStream, delta);
336 _rightToken = tail;
337 _rightToken.setNextWithoutSettingPrevious(_rightToken);
338 } else {
339 originalStream = _copyAndAdvance(originalStream, delta);
340 _rightToken = tail;
341 while (originalStream.type != TokenType.EOF) {
342 originalStream = _copyAndAdvance(originalStream, delta);
343 }
344 Token eof = _copyAndAdvance(originalStream, delta);
345 eof.setNextWithoutSettingPrevious(eof);
346 }
347 //
348 // If the index is immediately after an existing token and the inserted char acters did not
349 // change that original token, then adjust the leftToken to be the next toke n. For example, in
350 // "a; c;" --> "a;b c;", the leftToken was ";", but this code advances it to "b" since "b" is
351 // the first new token.
352 //
353 Token newFirst = _leftToken.next;
354 while (!identical(newFirst, _rightToken) && !identical(oldFirst, oldRightTok en) && newFirst.type != TokenType.EOF && _equalTokens(oldFirst, newFirst)) {
355 _tokenMap.put(oldFirst, newFirst);
356 oldLeftToken = oldFirst;
357 oldFirst = oldFirst.next;
358 _leftToken = newFirst;
359 newFirst = newFirst.next;
360 }
361 Token newLast = _rightToken.previous;
362 while (!identical(newLast, _leftToken) && !identical(oldLast, oldLeftToken) && newLast.type != TokenType.EOF && _equalTokens(oldLast, newLast)) {
363 _tokenMap.put(oldLast, newLast);
364 oldRightToken = oldLast;
365 oldLast = oldLast.previous;
366 _rightToken = newLast;
367 newLast = newLast.previous;
368 }
369 _hasNonWhitespaceChange = !identical(_leftToken.next, _rightToken) || !ident ical(oldLeftToken.next, oldRightToken);
370 //
371 // TODO(brianwilkerson) Begin tokens are not getting associated with the cor responding end
372 // tokens (because the end tokens have not been copied when we're copyin g the begin tokens).
373 // This could have implications for parsing.
374 // TODO(brianwilkerson) Update the lineInfo.
375 //
376 return firstToken;
377 }
378
379 Token _copyAndAdvance(Token originalToken, int delta) {
380 Token copiedToken = originalToken.copy();
381 _tokenMap.put(originalToken, copiedToken);
382 copiedToken.applyDelta(delta);
383 appendToken(copiedToken);
384 Token originalComment = originalToken.precedingComments;
385 Token copiedComment = originalToken.precedingComments;
386 while (originalComment != null) {
387 _tokenMap.put(originalComment, copiedComment);
388 originalComment = originalComment.next;
389 copiedComment = copiedComment.next;
390 }
391 return originalToken.next;
392 }
393
394 /**
395 * Return `true` if the two tokens are equal to each other. For the purposes o f the
396 * incremental scanner, two tokens are equal if they have the same type and le xeme.
397 *
398 * @param oldToken the token from the old stream that is being compared
399 * @param newToken the token from the new stream that is being compared
400 * @return `true` if the two tokens are equal to each other
401 */
402 bool _equalTokens(Token oldToken, Token newToken) => oldToken.type == newToken .type && oldToken.length == newToken.length && oldToken.lexeme == newToken.lexem e;
403 }
404
405 /**
406 * The enumeration `Keyword` defines the keywords in the Dart programming langua ge.
407 */
408 class Keyword extends Enum<Keyword> {
409 static const Keyword ASSERT = const Keyword.con1('ASSERT', 0, "assert");
410
411 static const Keyword BREAK = const Keyword.con1('BREAK', 1, "break");
412
413 static const Keyword CASE = const Keyword.con1('CASE', 2, "case");
414
415 static const Keyword CATCH = const Keyword.con1('CATCH', 3, "catch");
416
417 static const Keyword CLASS = const Keyword.con1('CLASS', 4, "class");
418
419 static const Keyword CONST = const Keyword.con1('CONST', 5, "const");
420
421 static const Keyword CONTINUE = const Keyword.con1('CONTINUE', 6, "continue");
422
423 static const Keyword DEFAULT = const Keyword.con1('DEFAULT', 7, "default");
424
425 static const Keyword DO = const Keyword.con1('DO', 8, "do");
426
427 static const Keyword ELSE = const Keyword.con1('ELSE', 9, "else");
428
429 static const Keyword ENUM = const Keyword.con1('ENUM', 10, "enum");
430
431 static const Keyword EXTENDS = const Keyword.con1('EXTENDS', 11, "extends");
432
433 static const Keyword FALSE = const Keyword.con1('FALSE', 12, "false");
434
435 static const Keyword FINAL = const Keyword.con1('FINAL', 13, "final");
436
437 static const Keyword FINALLY = const Keyword.con1('FINALLY', 14, "finally");
438
439 static const Keyword FOR = const Keyword.con1('FOR', 15, "for");
440
441 static const Keyword IF = const Keyword.con1('IF', 16, "if");
442
443 static const Keyword IN = const Keyword.con1('IN', 17, "in");
444
445 static const Keyword IS = const Keyword.con1('IS', 18, "is");
446
447 static const Keyword NEW = const Keyword.con1('NEW', 19, "new");
448
449 static const Keyword NULL = const Keyword.con1('NULL', 20, "null");
450
451 static const Keyword RETHROW = const Keyword.con1('RETHROW', 21, "rethrow");
452
453 static const Keyword RETURN = const Keyword.con1('RETURN', 22, "return");
454
455 static const Keyword SUPER = const Keyword.con1('SUPER', 23, "super");
456
457 static const Keyword SWITCH = const Keyword.con1('SWITCH', 24, "switch");
458
459 static const Keyword THIS = const Keyword.con1('THIS', 25, "this");
460
461 static const Keyword THROW = const Keyword.con1('THROW', 26, "throw");
462
463 static const Keyword TRUE = const Keyword.con1('TRUE', 27, "true");
464
465 static const Keyword TRY = const Keyword.con1('TRY', 28, "try");
466
467 static const Keyword VAR = const Keyword.con1('VAR', 29, "var");
468
469 static const Keyword VOID = const Keyword.con1('VOID', 30, "void");
470
471 static const Keyword WHILE = const Keyword.con1('WHILE', 31, "while");
472
473 static const Keyword WITH = const Keyword.con1('WITH', 32, "with");
474
475 static const Keyword ABSTRACT = const Keyword.con2('ABSTRACT', 33, "abstract", true);
476
477 static const Keyword AS = const Keyword.con2('AS', 34, "as", true);
478
479 static const Keyword DEFERRED = const Keyword.con2('DEFERRED', 35, "deferred", true);
480
481 static const Keyword DYNAMIC = const Keyword.con2('DYNAMIC', 36, "dynamic", tr ue);
482
483 static const Keyword EXPORT = const Keyword.con2('EXPORT', 37, "export", true) ;
484
485 static const Keyword EXTERNAL = const Keyword.con2('EXTERNAL', 38, "external", true);
486
487 static const Keyword FACTORY = const Keyword.con2('FACTORY', 39, "factory", tr ue);
488
489 static const Keyword GET = const Keyword.con2('GET', 40, "get", true);
490
491 static const Keyword IMPLEMENTS = const Keyword.con2('IMPLEMENTS', 41, "implem ents", true);
492
493 static const Keyword IMPORT = const Keyword.con2('IMPORT', 42, "import", true) ;
494
495 static const Keyword LIBRARY = const Keyword.con2('LIBRARY', 43, "library", tr ue);
496
497 static const Keyword OPERATOR = const Keyword.con2('OPERATOR', 44, "operator", true);
498
499 static const Keyword PART = const Keyword.con2('PART', 45, "part", true);
500
501 static const Keyword SET = const Keyword.con2('SET', 46, "set", true);
502
503 static const Keyword STATIC = const Keyword.con2('STATIC', 47, "static", true) ;
504
505 static const Keyword TYPEDEF = const Keyword.con2('TYPEDEF', 48, "typedef", tr ue);
506
507 static const List<Keyword> values = const [
508 ASSERT,
509 BREAK,
510 CASE,
511 CATCH,
512 CLASS,
513 CONST,
514 CONTINUE,
515 DEFAULT,
516 DO,
517 ELSE,
518 ENUM,
519 EXTENDS,
520 FALSE,
521 FINAL,
522 FINALLY,
523 FOR,
524 IF,
525 IN,
526 IS,
527 NEW,
528 NULL,
529 RETHROW,
530 RETURN,
531 SUPER,
532 SWITCH,
533 THIS,
534 THROW,
535 TRUE,
536 TRY,
537 VAR,
538 VOID,
539 WHILE,
540 WITH,
541 ABSTRACT,
542 AS,
543 DEFERRED,
544 DYNAMIC,
545 EXPORT,
546 EXTERNAL,
547 FACTORY,
548 GET,
549 IMPLEMENTS,
550 IMPORT,
551 LIBRARY,
552 OPERATOR,
553 PART,
554 SET,
555 STATIC,
556 TYPEDEF];
557
558 /**
559 * The lexeme for the keyword.
560 */
561 final String syntax;
562
563 /**
564 * A flag indicating whether the keyword is a pseudo-keyword. Pseudo keywords can be used as
565 * identifiers.
566 */
567 final bool isPseudoKeyword;
568
569 /**
570 * A table mapping the lexemes of keywords to the corresponding keyword.
571 */
572 static Map<String, Keyword> keywords = _createKeywordMap();
573
574 /**
575 * Create a table mapping the lexemes of keywords to the corresponding keyword .
576 *
577 * @return the table that was created
578 */
579 static Map<String, Keyword> _createKeywordMap() {
580 LinkedHashMap<String, Keyword> result = new LinkedHashMap<String, Keyword>() ;
581 for (Keyword keyword in values) {
582 result[keyword.syntax] = keyword;
583 }
584 return result;
585 }
586
587 /**
588 * Initialize a newly created keyword to have the given syntax. The keyword is not a
589 * pseudo-keyword.
590 *
591 * @param syntax the lexeme for the keyword
592 */
593 const Keyword.con1(String name, int ordinal, String syntax) : this.con2(name, ordinal, syntax, false);
594
595 /**
596 * Initialize a newly created keyword to have the given syntax. The keyword is a pseudo-keyword if
597 * the given flag is `true`.
598 *
599 * @param syntax the lexeme for the keyword
600 * @param isPseudoKeyword `true` if this keyword is a pseudo-keyword
601 */
602 const Keyword.con2(String name, int ordinal, this.syntax, this.isPseudoKeyword ) : super(name, ordinal);
603 }
604
605 /**
606 * Instances of the abstract class `KeywordState` represent a state in a state m achine used to
607 * scan keywords.
608 */
609 class KeywordState {
610 /**
611 * An empty transition table used by leaf states.
612 */
613 static List<KeywordState> _EMPTY_TABLE = new List<KeywordState>(26);
614
615 /**
616 * The initial state in the state machine.
617 */
618 static KeywordState KEYWORD_STATE = _createKeywordStateTable();
619
620 /**
621 * Create the next state in the state machine where we have already recognized the subset of
622 * strings in the given array of strings starting at the given offset and havi ng the given length.
623 * All of these strings have a common prefix and the next character is at the given start index.
624 *
625 * @param start the index of the character in the strings used to transition t o a new state
626 * @param strings an array containing all of the strings that will be recogniz ed by the state
627 * machine
628 * @param offset the offset of the first string in the array that has the pref ix that is assumed
629 * to have been recognized by the time we reach the state being built
630 * @param length the number of strings in the array that pass through the stat e being built
631 * @return the state that was created
632 */
633 static KeywordState _computeKeywordStateTable(int start, List<String> strings, int offset, int length) {
634 List<KeywordState> result = new List<KeywordState>(26);
635 assert(length != 0);
636 int chunk = 0x0;
637 int chunkStart = -1;
638 bool isLeaf = false;
639 for (int i = offset; i < offset + length; i++) {
640 if (strings[i].length == start) {
641 isLeaf = true;
642 }
643 if (strings[i].length > start) {
644 int c = strings[i].codeUnitAt(start);
645 if (chunk != c) {
646 if (chunkStart != -1) {
647 result[chunk - 0x61] = _computeKeywordStateTable(start + 1, strings, chunkStart, i - chunkStart);
648 }
649 chunkStart = i;
650 chunk = c;
651 }
652 }
653 }
654 if (chunkStart != -1) {
655 assert(result[chunk - 0x61] == null);
656 result[chunk - 0x61] = _computeKeywordStateTable(start + 1, strings, chunk Start, offset + length - chunkStart);
657 } else {
658 assert(length == 1);
659 return new KeywordState(_EMPTY_TABLE, strings[offset]);
660 }
661 if (isLeaf) {
662 return new KeywordState(result, strings[offset]);
663 } else {
664 return new KeywordState(result, null);
665 }
666 }
667
668 /**
669 * Create the initial state in the state machine.
670 *
671 * @return the state that was created
672 */
673 static KeywordState _createKeywordStateTable() {
674 List<Keyword> values = Keyword.values;
675 List<String> strings = new List<String>(values.length);
676 for (int i = 0; i < values.length; i++) {
677 strings[i] = values[i].syntax;
678 }
679 strings.sort();
680 return _computeKeywordStateTable(0, strings, 0, strings.length);
681 }
682
683 /**
684 * A table mapping characters to the states to which those characters will tra nsition. (The index
685 * into the array is the offset from the character `'a'` to the transitioning character.)
686 */
687 final List<KeywordState> _table;
688
689 /**
690 * The keyword that is recognized by this state, or `null` if this state is no t a terminal
691 * state.
692 */
693 Keyword _keyword;
694
695 /**
696 * Initialize a newly created state to have the given transitions and to recog nize the keyword
697 * with the given syntax.
698 *
699 * @param table a table mapping characters to the states to which those charac ters will transition
700 * @param syntax the syntax of the keyword that is recognized by the state
701 */
702 KeywordState(this._table, String syntax) {
703 this._keyword = (syntax == null) ? null : Keyword.keywords[syntax];
704 }
705
706 /**
707 * Return the keyword that was recognized by this state, or `null` if this sta te does not
708 * recognized a keyword.
709 *
710 * @return the keyword that was matched by reaching this state
711 */
712 Keyword keyword() => _keyword;
713
714 /**
715 * Return the state that follows this state on a transition of the given chara cter, or
716 * `null` if there is no valid state reachable from this state with such a tra nsition.
717 *
718 * @param c the character used to transition from this state to another state
719 * @return the state that follows this state on a transition of the given char acter
720 */
721 KeywordState next(int c) => _table[c - 0x61];
722 }
723
724 /**
725 * Instances of the class `KeywordToken` represent a keyword in the language.
726 */
727 class KeywordToken extends Token {
728 /**
729 * The keyword being represented by this token.
730 */
731 final Keyword keyword;
732
733 /**
734 * Initialize a newly created token to represent the given keyword.
735 *
736 * @param keyword the keyword being represented by this token
737 * @param offset the offset from the beginning of the file to the first charac ter in the token
738 */
739 KeywordToken(this.keyword, int offset) : super(TokenType.KEYWORD, offset);
740
741 @override
742 Token copy() => new KeywordToken(keyword, offset);
743
744 @override
745 String get lexeme => keyword.syntax;
746
747 @override
748 Keyword value() => keyword;
749 }
750
751 /**
752 * Instances of the class `KeywordTokenWithComment` implement a keyword token th at is preceded
753 * by comments.
754 */
755 class KeywordTokenWithComment extends KeywordToken {
756 /**
757 * The first comment in the list of comments that precede this token.
758 */
759 final Token _precedingComment;
760
761 /**
762 * Initialize a newly created token to to represent the given keyword and to b e preceded by the
763 * comments reachable from the given comment.
764 *
765 * @param keyword the keyword being represented by this token
766 * @param offset the offset from the beginning of the file to the first charac ter in the token
767 * @param precedingComment the first comment in the list of comments that prec ede this token
768 */
769 KeywordTokenWithComment(Keyword keyword, int offset, this._precedingComment) : super(keyword, offset);
770
771 @override
772 Token copy() => new KeywordTokenWithComment(keyword, offset, copyComments(_pre cedingComment));
773
774 @override
775 Token get precedingComments => _precedingComment;
776
777 @override
778 void applyDelta(int delta) {
779 super.applyDelta(delta);
780 Token token = _precedingComment;
781 while (token != null) {
782 token.applyDelta(delta);
783 token = token.next;
784 }
785 }
786 }
787
788 /**
789 * The class `Scanner` implements a scanner for Dart code.
790 *
791 * The lexical structure of Dart is ambiguous without knowledge of the context i n which a token is
792 * being scanned. For example, without context we cannot determine whether sourc e of the form "<<"
793 * should be scanned as a single left-shift operator or as two left angle bracke ts. This scanner
794 * does not have any context, so it always resolves such conflicts by scanning t he longest possible
795 * token.
796 */
797 class Scanner {
798 /**
799 * The source being scanned.
800 */
801 final Source source;
802
803 /**
804 * The reader used to access the characters in the source.
805 */
806 final CharacterReader _reader;
807
808 /**
809 * The error listener that will be informed of any errors that are found durin g the scan.
810 */
811 final AnalysisErrorListener _errorListener;
812
813 /**
814 * The flag specifying if documentation comments should be parsed.
815 */
816 bool _preserveComments = true;
817
818 /**
819 * The token pointing to the head of the linked list of tokens.
820 */
821 Token _tokens;
822
823 /**
824 * The last token that was scanned.
825 */
826 Token _tail;
827
828 /**
829 * The first token in the list of comment tokens found since the last non-comm ent token.
830 */
831 Token _firstComment;
832
833 /**
834 * The last token in the list of comment tokens found since the last non-comme nt token.
835 */
836 Token _lastComment;
837
838 /**
839 * The index of the first character of the current token.
840 */
841 int _tokenStart = 0;
842
843 /**
844 * A list containing the offsets of the first character of each line in the so urce code.
845 */
846 List<int> _lineStarts = new List<int>();
847
848 /**
849 * A list, treated something like a stack, of tokens representing the beginnin g of a matched pair.
850 * It is used to pair the end tokens with the begin tokens.
851 */
852 List<BeginToken> _groupingStack = new List<BeginToken>();
853
854 /**
855 * The index of the last item in the [groupingStack], or `-1` if the stack is empty.
856 */
857 int _stackEnd = -1;
858
859 /**
860 * A flag indicating whether any unmatched groups were found during the parse.
861 */
862 bool _hasUnmatchedGroups = false;
863
864 /**
865 * Initialize a newly created scanner.
866 *
867 * @param source the source being scanned
868 * @param reader the character reader used to read the characters in the sourc e
869 * @param errorListener the error listener that will be informed of any errors that are found
870 */
871 Scanner(this.source, this._reader, this._errorListener) {
872 _tokens = new Token(TokenType.EOF, -1);
873 _tokens.setNext(_tokens);
874 _tail = _tokens;
875 _tokenStart = -1;
876 _lineStarts.add(0);
877 }
878
879 /**
880 * Return an array containing the offsets of the first character of each line in the source code.
881 *
882 * @return an array containing the offsets of the first character of each line in the source code
883 */
884 List<int> get lineStarts => _lineStarts;
885
886 /**
887 * Return `true` if any unmatched groups were found during the parse.
888 *
889 * @return `true` if any unmatched groups were found during the parse
890 */
891 bool get hasUnmatchedGroups => _hasUnmatchedGroups;
892
893 /**
894 * Set whether documentation tokens should be scanned.
895 *
896 * @param preserveComments `true` if documentation tokens should be scanned
897 */
898 void set preserveComments(bool preserveComments) {
899 this._preserveComments = preserveComments;
900 }
901
902 /**
903 * Record that the source begins on the given line and column at the current o ffset as given by
904 * the reader. The line starts for lines before the given line will not be cor rect.
905 *
906 * This method must be invoked at most one time and must be invoked before sca nning begins. The
907 * values provided must be sensible. The results are undefined if these condit ions are violated.
908 *
909 * @param line the one-based index of the line containing the first character of the source
910 * @param column the one-based index of the column in which the first characte r of the source
911 * occurs
912 */
913 void setSourceStart(int line, int column) {
914 int offset = _reader.offset;
915 if (line < 1 || column < 1 || offset < 0 || (line + column - 2) >= offset) {
916 return;
917 }
918 for (int i = 2; i < line; i++) {
919 _lineStarts.add(1);
920 }
921 _lineStarts.add(offset - column + 1);
922 }
923
924 /**
925 * Scan the source code to produce a list of tokens representing the source.
926 *
927 * @return the first token in the list of tokens that were produced
928 */
929 Token tokenize() {
930 InstrumentationBuilder instrumentation = Instrumentation.builder2("dart.engi ne.AbstractScanner.tokenize");
931 int tokenCounter = 0;
932 try {
933 int next = _reader.advance();
934 while (next != -1) {
935 tokenCounter++;
936 next = bigSwitch(next);
937 }
938 _appendEofToken();
939 instrumentation.metric2("tokensCount", tokenCounter);
940 return firstToken;
941 } finally {
942 instrumentation.log2(2);
943 }
944 }
945
946 /**
947 * Append the given token to the end of the token stream being scanned. This m ethod is intended to
948 * be used by subclasses that copy existing tokens and should not normally be used because it will
949 * fail to correctly associate any comments with the token being passed in.
950 *
951 * @param token the token to be appended
952 */
953 void appendToken(Token token) {
954 _tail = _tail.setNext(token);
955 }
956
957 int bigSwitch(int next) {
958 _beginToken();
959 if (next == 0xD) {
960 next = _reader.advance();
961 if (next == 0xA) {
962 next = _reader.advance();
963 }
964 recordStartOfLine();
965 return next;
966 } else if (next == 0xA) {
967 next = _reader.advance();
968 recordStartOfLine();
969 return next;
970 } else if (next == 0x9 || next == 0x20) {
971 return _reader.advance();
972 }
973 if (next == 0x72) {
974 int peek = _reader.peek();
975 if (peek == 0x22 || peek == 0x27) {
976 int start = _reader.offset;
977 return _tokenizeString(_reader.advance(), start, true);
978 }
979 }
980 if (0x61 <= next && next <= 0x7A) {
981 return _tokenizeKeywordOrIdentifier(next, true);
982 }
983 if ((0x41 <= next && next <= 0x5A) || next == 0x5F || next == 0x24) {
984 return _tokenizeIdentifier(next, _reader.offset, true);
985 }
986 if (next == 0x3C) {
987 return _tokenizeLessThan(next);
988 }
989 if (next == 0x3E) {
990 return _tokenizeGreaterThan(next);
991 }
992 if (next == 0x3D) {
993 return _tokenizeEquals(next);
994 }
995 if (next == 0x21) {
996 return _tokenizeExclamation(next);
997 }
998 if (next == 0x2B) {
999 return _tokenizePlus(next);
1000 }
1001 if (next == 0x2D) {
1002 return _tokenizeMinus(next);
1003 }
1004 if (next == 0x2A) {
1005 return _tokenizeMultiply(next);
1006 }
1007 if (next == 0x25) {
1008 return _tokenizePercent(next);
1009 }
1010 if (next == 0x26) {
1011 return _tokenizeAmpersand(next);
1012 }
1013 if (next == 0x7C) {
1014 return _tokenizeBar(next);
1015 }
1016 if (next == 0x5E) {
1017 return _tokenizeCaret(next);
1018 }
1019 if (next == 0x5B) {
1020 return _tokenizeOpenSquareBracket(next);
1021 }
1022 if (next == 0x7E) {
1023 return _tokenizeTilde(next);
1024 }
1025 if (next == 0x5C) {
1026 _appendTokenOfType(TokenType.BACKSLASH);
1027 return _reader.advance();
1028 }
1029 if (next == 0x23) {
1030 return _tokenizeTag(next);
1031 }
1032 if (next == 0x28) {
1033 _appendBeginToken(TokenType.OPEN_PAREN);
1034 return _reader.advance();
1035 }
1036 if (next == 0x29) {
1037 _appendEndToken(TokenType.CLOSE_PAREN, TokenType.OPEN_PAREN);
1038 return _reader.advance();
1039 }
1040 if (next == 0x2C) {
1041 _appendTokenOfType(TokenType.COMMA);
1042 return _reader.advance();
1043 }
1044 if (next == 0x3A) {
1045 _appendTokenOfType(TokenType.COLON);
1046 return _reader.advance();
1047 }
1048 if (next == 0x3B) {
1049 _appendTokenOfType(TokenType.SEMICOLON);
1050 return _reader.advance();
1051 }
1052 if (next == 0x3F) {
1053 _appendTokenOfType(TokenType.QUESTION);
1054 return _reader.advance();
1055 }
1056 if (next == 0x5D) {
1057 _appendEndToken(TokenType.CLOSE_SQUARE_BRACKET, TokenType.OPEN_SQUARE_BRAC KET);
1058 return _reader.advance();
1059 }
1060 if (next == 0x60) {
1061 _appendTokenOfType(TokenType.BACKPING);
1062 return _reader.advance();
1063 }
1064 if (next == 0x7B) {
1065 _appendBeginToken(TokenType.OPEN_CURLY_BRACKET);
1066 return _reader.advance();
1067 }
1068 if (next == 0x7D) {
1069 _appendEndToken(TokenType.CLOSE_CURLY_BRACKET, TokenType.OPEN_CURLY_BRACKE T);
1070 return _reader.advance();
1071 }
1072 if (next == 0x2F) {
1073 return _tokenizeSlashOrComment(next);
1074 }
1075 if (next == 0x40) {
1076 _appendTokenOfType(TokenType.AT);
1077 return _reader.advance();
1078 }
1079 if (next == 0x22 || next == 0x27) {
1080 return _tokenizeString(next, _reader.offset, false);
1081 }
1082 if (next == 0x2E) {
1083 return _tokenizeDotOrNumber(next);
1084 }
1085 if (next == 0x30) {
1086 return _tokenizeHexOrNumber(next);
1087 }
1088 if (0x31 <= next && next <= 0x39) {
1089 return _tokenizeNumber(next);
1090 }
1091 if (next == -1) {
1092 return -1;
1093 }
1094 _reportError(ScannerErrorCode.ILLEGAL_CHARACTER, [next]);
1095 return _reader.advance();
1096 }
1097
1098 /**
1099 * Return the first token in the token stream that was scanned.
1100 *
1101 * @return the first token in the token stream that was scanned
1102 */
1103 Token get firstToken => _tokens.next;
1104
1105 /**
1106 * Return the last token that was scanned.
1107 *
1108 * @return the last token that was scanned
1109 */
1110 Token get tail => _tail;
1111
1112 /**
1113 * Record the fact that we are at the beginning of a new line in the source.
1114 */
1115 void recordStartOfLine() {
1116 _lineStarts.add(_reader.offset);
1117 }
1118
1119 void _appendBeginToken(TokenType type) {
1120 BeginToken token;
1121 if (_firstComment == null) {
1122 token = new BeginToken(type, _tokenStart);
1123 } else {
1124 token = new BeginTokenWithComment(type, _tokenStart, _firstComment);
1125 _firstComment = null;
1126 _lastComment = null;
1127 }
1128 _tail = _tail.setNext(token);
1129 _groupingStack.add(token);
1130 _stackEnd++;
1131 }
1132
1133 void _appendCommentToken(TokenType type, String value) {
1134 // Ignore comment tokens if client specified that it doesn't need them.
1135 if (!_preserveComments) {
1136 return;
1137 }
1138 // OK, remember comment tokens.
1139 if (_firstComment == null) {
1140 _firstComment = new StringToken(type, value, _tokenStart);
1141 _lastComment = _firstComment;
1142 } else {
1143 _lastComment = _lastComment.setNext(new StringToken(type, value, _tokenSta rt));
1144 }
1145 }
1146
1147 void _appendEndToken(TokenType type, TokenType beginType) {
1148 Token token;
1149 if (_firstComment == null) {
1150 token = new Token(type, _tokenStart);
1151 } else {
1152 token = new TokenWithComment(type, _tokenStart, _firstComment);
1153 _firstComment = null;
1154 _lastComment = null;
1155 }
1156 _tail = _tail.setNext(token);
1157 if (_stackEnd >= 0) {
1158 BeginToken begin = _groupingStack[_stackEnd];
1159 if (begin.type == beginType) {
1160 begin.endToken = token;
1161 _groupingStack.removeAt(_stackEnd--);
1162 }
1163 }
1164 }
1165
1166 void _appendEofToken() {
1167 Token eofToken;
1168 if (_firstComment == null) {
1169 eofToken = new Token(TokenType.EOF, _reader.offset + 1);
1170 } else {
1171 eofToken = new TokenWithComment(TokenType.EOF, _reader.offset + 1, _firstC omment);
1172 _firstComment = null;
1173 _lastComment = null;
1174 }
1175 // The EOF token points to itself so that there is always infinite look-ahea d.
1176 eofToken.setNext(eofToken);
1177 _tail = _tail.setNext(eofToken);
1178 if (_stackEnd >= 0) {
1179 _hasUnmatchedGroups = true;
1180 }
1181 }
1182
1183 void _appendKeywordToken(Keyword keyword) {
1184 if (_firstComment == null) {
1185 _tail = _tail.setNext(new KeywordToken(keyword, _tokenStart));
1186 } else {
1187 _tail = _tail.setNext(new KeywordTokenWithComment(keyword, _tokenStart, _f irstComment));
1188 _firstComment = null;
1189 _lastComment = null;
1190 }
1191 }
1192
1193 void _appendStringToken(TokenType type, String value) {
1194 if (_firstComment == null) {
1195 _tail = _tail.setNext(new StringToken(type, value, _tokenStart));
1196 } else {
1197 _tail = _tail.setNext(new StringTokenWithComment(type, value, _tokenStart, _firstComment));
1198 _firstComment = null;
1199 _lastComment = null;
1200 }
1201 }
1202
1203 void _appendStringTokenWithOffset(TokenType type, String value, int offset) {
1204 if (_firstComment == null) {
1205 _tail = _tail.setNext(new StringToken(type, value, _tokenStart + offset));
1206 } else {
1207 _tail = _tail.setNext(new StringTokenWithComment(type, value, _tokenStart + offset, _firstComment));
1208 _firstComment = null;
1209 _lastComment = null;
1210 }
1211 }
1212
1213 void _appendTokenOfType(TokenType type) {
1214 if (_firstComment == null) {
1215 _tail = _tail.setNext(new Token(type, _tokenStart));
1216 } else {
1217 _tail = _tail.setNext(new TokenWithComment(type, _tokenStart, _firstCommen t));
1218 _firstComment = null;
1219 _lastComment = null;
1220 }
1221 }
1222
1223 void _appendTokenOfTypeWithOffset(TokenType type, int offset) {
1224 if (_firstComment == null) {
1225 _tail = _tail.setNext(new Token(type, offset));
1226 } else {
1227 _tail = _tail.setNext(new TokenWithComment(type, offset, _firstComment));
1228 _firstComment = null;
1229 _lastComment = null;
1230 }
1231 }
1232
1233 void _beginToken() {
1234 _tokenStart = _reader.offset;
1235 }
1236
1237 /**
1238 * Return the beginning token corresponding to a closing brace that was found while scanning
1239 * inside a string interpolation expression. Tokens that cannot be matched wit h the closing brace
1240 * will be dropped from the stack.
1241 *
1242 * @return the token to be paired with the closing brace
1243 */
1244 BeginToken _findTokenMatchingClosingBraceInInterpolationExpression() {
1245 while (_stackEnd >= 0) {
1246 BeginToken begin = _groupingStack[_stackEnd];
1247 if (begin.type == TokenType.OPEN_CURLY_BRACKET || begin.type == TokenType. STRING_INTERPOLATION_EXPRESSION) {
1248 return begin;
1249 }
1250 _hasUnmatchedGroups = true;
1251 _groupingStack.removeAt(_stackEnd--);
1252 }
1253 //
1254 // We should never get to this point because we wouldn't be inside a string interpolation
1255 // expression unless we had previously found the start of the expression.
1256 //
1257 return null;
1258 }
1259
1260 /**
1261 * Report an error at the current offset.
1262 *
1263 * @param errorCode the error code indicating the nature of the error
1264 * @param arguments any arguments needed to complete the error message
1265 */
1266 void _reportError(ScannerErrorCode errorCode, List<Object> arguments) {
1267 _errorListener.onError(new AnalysisError.con2(source, _reader.offset, 1, err orCode, arguments));
1268 }
1269
1270 int _select(int choice, TokenType yesType, TokenType noType) {
1271 int next = _reader.advance();
1272 if (next == choice) {
1273 _appendTokenOfType(yesType);
1274 return _reader.advance();
1275 } else {
1276 _appendTokenOfType(noType);
1277 return next;
1278 }
1279 }
1280
1281 int _selectWithOffset(int choice, TokenType yesType, TokenType noType, int off set) {
1282 int next = _reader.advance();
1283 if (next == choice) {
1284 _appendTokenOfTypeWithOffset(yesType, offset);
1285 return _reader.advance();
1286 } else {
1287 _appendTokenOfTypeWithOffset(noType, offset);
1288 return next;
1289 }
1290 }
1291
1292 int _tokenizeAmpersand(int next) {
1293 // && &= &
1294 next = _reader.advance();
1295 if (next == 0x26) {
1296 _appendTokenOfType(TokenType.AMPERSAND_AMPERSAND);
1297 return _reader.advance();
1298 } else if (next == 0x3D) {
1299 _appendTokenOfType(TokenType.AMPERSAND_EQ);
1300 return _reader.advance();
1301 } else {
1302 _appendTokenOfType(TokenType.AMPERSAND);
1303 return next;
1304 }
1305 }
1306
1307 int _tokenizeBar(int next) {
1308 // | || |=
1309 next = _reader.advance();
1310 if (next == 0x7C) {
1311 _appendTokenOfType(TokenType.BAR_BAR);
1312 return _reader.advance();
1313 } else if (next == 0x3D) {
1314 _appendTokenOfType(TokenType.BAR_EQ);
1315 return _reader.advance();
1316 } else {
1317 _appendTokenOfType(TokenType.BAR);
1318 return next;
1319 }
1320 }
1321
1322 int _tokenizeCaret(int next) => _select(0x3D, TokenType.CARET_EQ, TokenType.CA RET);
1323
1324 int _tokenizeDotOrNumber(int next) {
1325 int start = _reader.offset;
1326 next = _reader.advance();
1327 if (0x30 <= next && next <= 0x39) {
1328 return _tokenizeFractionPart(next, start);
1329 } else if (0x2E == next) {
1330 return _select(0x2E, TokenType.PERIOD_PERIOD_PERIOD, TokenType.PERIOD_PERI OD);
1331 } else {
1332 _appendTokenOfType(TokenType.PERIOD);
1333 return next;
1334 }
1335 }
1336
1337 int _tokenizeEquals(int next) {
1338 // = == =>
1339 next = _reader.advance();
1340 if (next == 0x3D) {
1341 _appendTokenOfType(TokenType.EQ_EQ);
1342 return _reader.advance();
1343 } else if (next == 0x3E) {
1344 _appendTokenOfType(TokenType.FUNCTION);
1345 return _reader.advance();
1346 }
1347 _appendTokenOfType(TokenType.EQ);
1348 return next;
1349 }
1350
1351 int _tokenizeExclamation(int next) {
1352 // ! !=
1353 next = _reader.advance();
1354 if (next == 0x3D) {
1355 _appendTokenOfType(TokenType.BANG_EQ);
1356 return _reader.advance();
1357 }
1358 _appendTokenOfType(TokenType.BANG);
1359 return next;
1360 }
1361
1362 int _tokenizeExponent(int next) {
1363 if (next == 0x2B || next == 0x2D) {
1364 next = _reader.advance();
1365 }
1366 bool hasDigits = false;
1367 while (true) {
1368 if (0x30 <= next && next <= 0x39) {
1369 hasDigits = true;
1370 } else {
1371 if (!hasDigits) {
1372 _reportError(ScannerErrorCode.MISSING_DIGIT, []);
1373 }
1374 return next;
1375 }
1376 next = _reader.advance();
1377 }
1378 }
1379
1380 int _tokenizeFractionPart(int next, int start) {
1381 bool done = false;
1382 bool hasDigit = false;
1383 LOOP: while (!done) {
1384 if (0x30 <= next && next <= 0x39) {
1385 hasDigit = true;
1386 } else if (0x65 == next || 0x45 == next) {
1387 hasDigit = true;
1388 next = _tokenizeExponent(_reader.advance());
1389 done = true;
1390 continue LOOP;
1391 } else {
1392 done = true;
1393 continue LOOP;
1394 }
1395 next = _reader.advance();
1396 }
1397 if (!hasDigit) {
1398 _appendStringToken(TokenType.INT, _reader.getString(start, -2));
1399 if (0x2E == next) {
1400 return _selectWithOffset(0x2E, TokenType.PERIOD_PERIOD_PERIOD, TokenType .PERIOD_PERIOD, _reader.offset - 1);
1401 }
1402 _appendTokenOfTypeWithOffset(TokenType.PERIOD, _reader.offset - 1);
1403 return bigSwitch(next);
1404 }
1405 _appendStringToken(TokenType.DOUBLE, _reader.getString(start, next < 0 ? 0 : -1));
1406 return next;
1407 }
1408
1409 int _tokenizeGreaterThan(int next) {
1410 // > >= >> >>=
1411 next = _reader.advance();
1412 if (0x3D == next) {
1413 _appendTokenOfType(TokenType.GT_EQ);
1414 return _reader.advance();
1415 } else if (0x3E == next) {
1416 next = _reader.advance();
1417 if (0x3D == next) {
1418 _appendTokenOfType(TokenType.GT_GT_EQ);
1419 return _reader.advance();
1420 } else {
1421 _appendTokenOfType(TokenType.GT_GT);
1422 return next;
1423 }
1424 } else {
1425 _appendTokenOfType(TokenType.GT);
1426 return next;
1427 }
1428 }
1429
1430 int _tokenizeHex(int next) {
1431 int start = _reader.offset - 1;
1432 bool hasDigits = false;
1433 while (true) {
1434 next = _reader.advance();
1435 if ((0x30 <= next && next <= 0x39) || (0x41 <= next && next <= 0x46) || (0 x61 <= next && next <= 0x66)) {
1436 hasDigits = true;
1437 } else {
1438 if (!hasDigits) {
1439 _reportError(ScannerErrorCode.MISSING_HEX_DIGIT, []);
1440 }
1441 _appendStringToken(TokenType.HEXADECIMAL, _reader.getString(start, next < 0 ? 0 : -1));
1442 return next;
1443 }
1444 }
1445 }
1446
1447 int _tokenizeHexOrNumber(int next) {
1448 int x = _reader.peek();
1449 if (x == 0x78 || x == 0x58) {
1450 _reader.advance();
1451 return _tokenizeHex(x);
1452 }
1453 return _tokenizeNumber(next);
1454 }
1455
1456 int _tokenizeIdentifier(int next, int start, bool allowDollar) {
1457 while ((0x61 <= next && next <= 0x7A) || (0x41 <= next && next <= 0x5A) || ( 0x30 <= next && next <= 0x39) || next == 0x5F || (next == 0x24 && allowDollar)) {
1458 next = _reader.advance();
1459 }
1460 _appendStringToken(TokenType.IDENTIFIER, _reader.getString(start, next < 0 ? 0 : -1));
1461 return next;
1462 }
1463
1464 int _tokenizeInterpolatedExpression(int next, int start) {
1465 _appendBeginToken(TokenType.STRING_INTERPOLATION_EXPRESSION);
1466 next = _reader.advance();
1467 while (next != -1) {
1468 if (next == 0x7D) {
1469 BeginToken begin = _findTokenMatchingClosingBraceInInterpolationExpressi on();
1470 if (begin == null) {
1471 _beginToken();
1472 _appendTokenOfType(TokenType.CLOSE_CURLY_BRACKET);
1473 next = _reader.advance();
1474 _beginToken();
1475 return next;
1476 } else if (begin.type == TokenType.OPEN_CURLY_BRACKET) {
1477 _beginToken();
1478 _appendEndToken(TokenType.CLOSE_CURLY_BRACKET, TokenType.OPEN_CURLY_BR ACKET);
1479 next = _reader.advance();
1480 _beginToken();
1481 } else if (begin.type == TokenType.STRING_INTERPOLATION_EXPRESSION) {
1482 _beginToken();
1483 _appendEndToken(TokenType.CLOSE_CURLY_BRACKET, TokenType.STRING_INTERP OLATION_EXPRESSION);
1484 next = _reader.advance();
1485 _beginToken();
1486 return next;
1487 }
1488 } else {
1489 next = bigSwitch(next);
1490 }
1491 }
1492 return next;
1493 }
1494
1495 int _tokenizeInterpolatedIdentifier(int next, int start) {
1496 _appendStringTokenWithOffset(TokenType.STRING_INTERPOLATION_IDENTIFIER, "\$" , 0);
1497 if ((0x41 <= next && next <= 0x5A) || (0x61 <= next && next <= 0x7A) || next == 0x5F) {
1498 _beginToken();
1499 next = _tokenizeKeywordOrIdentifier(next, false);
1500 }
1501 _beginToken();
1502 return next;
1503 }
1504
1505 int _tokenizeKeywordOrIdentifier(int next, bool allowDollar) {
1506 KeywordState state = KeywordState.KEYWORD_STATE;
1507 int start = _reader.offset;
1508 while (state != null && 0x61 <= next && next <= 0x7A) {
1509 state = state.next(next);
1510 next = _reader.advance();
1511 }
1512 if (state == null || state.keyword() == null) {
1513 return _tokenizeIdentifier(next, start, allowDollar);
1514 }
1515 if ((0x41 <= next && next <= 0x5A) || (0x30 <= next && next <= 0x39) || next == 0x5F || next == 0x24) {
1516 return _tokenizeIdentifier(next, start, allowDollar);
1517 } else if (next < 128) {
1518 _appendKeywordToken(state.keyword());
1519 return next;
1520 } else {
1521 return _tokenizeIdentifier(next, start, allowDollar);
1522 }
1523 }
1524
1525 int _tokenizeLessThan(int next) {
1526 // < <= << <<=
1527 next = _reader.advance();
1528 if (0x3D == next) {
1529 _appendTokenOfType(TokenType.LT_EQ);
1530 return _reader.advance();
1531 } else if (0x3C == next) {
1532 return _select(0x3D, TokenType.LT_LT_EQ, TokenType.LT_LT);
1533 } else {
1534 _appendTokenOfType(TokenType.LT);
1535 return next;
1536 }
1537 }
1538
1539 int _tokenizeMinus(int next) {
1540 // - -- -=
1541 next = _reader.advance();
1542 if (next == 0x2D) {
1543 _appendTokenOfType(TokenType.MINUS_MINUS);
1544 return _reader.advance();
1545 } else if (next == 0x3D) {
1546 _appendTokenOfType(TokenType.MINUS_EQ);
1547 return _reader.advance();
1548 } else {
1549 _appendTokenOfType(TokenType.MINUS);
1550 return next;
1551 }
1552 }
1553
1554 int _tokenizeMultiLineComment(int next) {
1555 int nesting = 1;
1556 next = _reader.advance();
1557 while (true) {
1558 if (-1 == next) {
1559 _reportError(ScannerErrorCode.UNTERMINATED_MULTI_LINE_COMMENT, []);
1560 _appendCommentToken(TokenType.MULTI_LINE_COMMENT, _reader.getString(_tok enStart, 0));
1561 return next;
1562 } else if (0x2A == next) {
1563 next = _reader.advance();
1564 if (0x2F == next) {
1565 --nesting;
1566 if (0 == nesting) {
1567 _appendCommentToken(TokenType.MULTI_LINE_COMMENT, _reader.getString( _tokenStart, 0));
1568 return _reader.advance();
1569 } else {
1570 next = _reader.advance();
1571 }
1572 }
1573 } else if (0x2F == next) {
1574 next = _reader.advance();
1575 if (0x2A == next) {
1576 next = _reader.advance();
1577 ++nesting;
1578 }
1579 } else if (next == 0xD) {
1580 next = _reader.advance();
1581 if (next == 0xA) {
1582 next = _reader.advance();
1583 }
1584 recordStartOfLine();
1585 } else if (next == 0xA) {
1586 recordStartOfLine();
1587 next = _reader.advance();
1588 } else {
1589 next = _reader.advance();
1590 }
1591 }
1592 }
1593
1594 int _tokenizeMultiLineRawString(int quoteChar, int start) {
1595 int next = _reader.advance();
1596 outer: while (next != -1) {
1597 while (next != quoteChar) {
1598 next = _reader.advance();
1599 if (next == -1) {
1600 break outer;
1601 } else if (next == 0xD) {
1602 next = _reader.advance();
1603 if (next == 0xA) {
1604 next = _reader.advance();
1605 }
1606 recordStartOfLine();
1607 } else if (next == 0xA) {
1608 recordStartOfLine();
1609 next = _reader.advance();
1610 }
1611 }
1612 next = _reader.advance();
1613 if (next == quoteChar) {
1614 next = _reader.advance();
1615 if (next == quoteChar) {
1616 _appendStringToken(TokenType.STRING, _reader.getString(start, 0));
1617 return _reader.advance();
1618 }
1619 }
1620 }
1621 _reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL, []);
1622 _appendStringToken(TokenType.STRING, _reader.getString(start, 0));
1623 return _reader.advance();
1624 }
1625
1626 int _tokenizeMultiLineString(int quoteChar, int start, bool raw) {
1627 if (raw) {
1628 return _tokenizeMultiLineRawString(quoteChar, start);
1629 }
1630 int next = _reader.advance();
1631 while (next != -1) {
1632 if (next == 0x24) {
1633 _appendStringToken(TokenType.STRING, _reader.getString(start, -1));
1634 next = _tokenizeStringInterpolation(start);
1635 _beginToken();
1636 start = _reader.offset;
1637 continue;
1638 }
1639 if (next == quoteChar) {
1640 next = _reader.advance();
1641 if (next == quoteChar) {
1642 next = _reader.advance();
1643 if (next == quoteChar) {
1644 _appendStringToken(TokenType.STRING, _reader.getString(start, 0));
1645 return _reader.advance();
1646 }
1647 }
1648 continue;
1649 }
1650 if (next == 0x5C) {
1651 next = _reader.advance();
1652 if (next == -1) {
1653 break;
1654 }
1655 if (next == 0xD) {
1656 next = _reader.advance();
1657 if (next == 0xA) {
1658 next = _reader.advance();
1659 }
1660 recordStartOfLine();
1661 } else if (next == 0xA) {
1662 recordStartOfLine();
1663 next = _reader.advance();
1664 } else {
1665 next = _reader.advance();
1666 }
1667 } else if (next == 0xD) {
1668 next = _reader.advance();
1669 if (next == 0xA) {
1670 next = _reader.advance();
1671 }
1672 recordStartOfLine();
1673 } else if (next == 0xA) {
1674 recordStartOfLine();
1675 next = _reader.advance();
1676 } else {
1677 next = _reader.advance();
1678 }
1679 }
1680 _reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL, []);
1681 if (start == _reader.offset) {
1682 _appendStringTokenWithOffset(TokenType.STRING, "", 1);
1683 } else {
1684 _appendStringToken(TokenType.STRING, _reader.getString(start, 0));
1685 }
1686 return _reader.advance();
1687 }
1688
1689 int _tokenizeMultiply(int next) => _select(0x3D, TokenType.STAR_EQ, TokenType. STAR);
1690
1691 int _tokenizeNumber(int next) {
1692 int start = _reader.offset;
1693 while (true) {
1694 next = _reader.advance();
1695 if (0x30 <= next && next <= 0x39) {
1696 continue;
1697 } else if (next == 0x2E) {
1698 return _tokenizeFractionPart(_reader.advance(), start);
1699 } else if (next == 0x65 || next == 0x45) {
1700 return _tokenizeFractionPart(next, start);
1701 } else {
1702 _appendStringToken(TokenType.INT, _reader.getString(start, next < 0 ? 0 : -1));
1703 return next;
1704 }
1705 }
1706 }
1707
1708 int _tokenizeOpenSquareBracket(int next) {
1709 // [ [] []=
1710 next = _reader.advance();
1711 if (next == 0x5D) {
1712 return _select(0x3D, TokenType.INDEX_EQ, TokenType.INDEX);
1713 } else {
1714 _appendBeginToken(TokenType.OPEN_SQUARE_BRACKET);
1715 return next;
1716 }
1717 }
1718
1719 int _tokenizePercent(int next) => _select(0x3D, TokenType.PERCENT_EQ, TokenTyp e.PERCENT);
1720
1721 int _tokenizePlus(int next) {
1722 // + ++ +=
1723 next = _reader.advance();
1724 if (0x2B == next) {
1725 _appendTokenOfType(TokenType.PLUS_PLUS);
1726 return _reader.advance();
1727 } else if (0x3D == next) {
1728 _appendTokenOfType(TokenType.PLUS_EQ);
1729 return _reader.advance();
1730 } else {
1731 _appendTokenOfType(TokenType.PLUS);
1732 return next;
1733 }
1734 }
1735
1736 int _tokenizeSingleLineComment(int next) {
1737 while (true) {
1738 next = _reader.advance();
1739 if (-1 == next) {
1740 _appendCommentToken(TokenType.SINGLE_LINE_COMMENT, _reader.getString(_to kenStart, 0));
1741 return next;
1742 } else if (0xA == next || 0xD == next) {
1743 _appendCommentToken(TokenType.SINGLE_LINE_COMMENT, _reader.getString(_to kenStart, -1));
1744 return next;
1745 }
1746 }
1747 }
1748
1749 int _tokenizeSingleLineRawString(int next, int quoteChar, int start) {
1750 next = _reader.advance();
1751 while (next != -1) {
1752 if (next == quoteChar) {
1753 _appendStringToken(TokenType.STRING, _reader.getString(start, 0));
1754 return _reader.advance();
1755 } else if (next == 0xD || next == 0xA) {
1756 _reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL, []);
1757 _appendStringToken(TokenType.STRING, _reader.getString(start, -1));
1758 return _reader.advance();
1759 }
1760 next = _reader.advance();
1761 }
1762 _reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL, []);
1763 _appendStringToken(TokenType.STRING, _reader.getString(start, 0));
1764 return _reader.advance();
1765 }
1766
1767 int _tokenizeSingleLineString(int next, int quoteChar, int start) {
1768 while (next != quoteChar) {
1769 if (next == 0x5C) {
1770 next = _reader.advance();
1771 } else if (next == 0x24) {
1772 _appendStringToken(TokenType.STRING, _reader.getString(start, -1));
1773 next = _tokenizeStringInterpolation(start);
1774 _beginToken();
1775 start = _reader.offset;
1776 continue;
1777 }
1778 if (next <= 0xD && (next == 0xA || next == 0xD || next == -1)) {
1779 _reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL, []);
1780 if (start == _reader.offset) {
1781 _appendStringTokenWithOffset(TokenType.STRING, "", 1);
1782 } else if (next == -1) {
1783 _appendStringToken(TokenType.STRING, _reader.getString(start, 0));
1784 } else {
1785 _appendStringToken(TokenType.STRING, _reader.getString(start, -1));
1786 }
1787 return _reader.advance();
1788 }
1789 next = _reader.advance();
1790 }
1791 _appendStringToken(TokenType.STRING, _reader.getString(start, 0));
1792 return _reader.advance();
1793 }
1794
1795 int _tokenizeSlashOrComment(int next) {
1796 next = _reader.advance();
1797 if (0x2A == next) {
1798 return _tokenizeMultiLineComment(next);
1799 } else if (0x2F == next) {
1800 return _tokenizeSingleLineComment(next);
1801 } else if (0x3D == next) {
1802 _appendTokenOfType(TokenType.SLASH_EQ);
1803 return _reader.advance();
1804 } else {
1805 _appendTokenOfType(TokenType.SLASH);
1806 return next;
1807 }
1808 }
1809
1810 int _tokenizeString(int next, int start, bool raw) {
1811 int quoteChar = next;
1812 next = _reader.advance();
1813 if (quoteChar == next) {
1814 next = _reader.advance();
1815 if (quoteChar == next) {
1816 // Multiline string.
1817 return _tokenizeMultiLineString(quoteChar, start, raw);
1818 } else {
1819 // Empty string.
1820 _appendStringToken(TokenType.STRING, _reader.getString(start, -1));
1821 return next;
1822 }
1823 }
1824 if (raw) {
1825 return _tokenizeSingleLineRawString(next, quoteChar, start);
1826 } else {
1827 return _tokenizeSingleLineString(next, quoteChar, start);
1828 }
1829 }
1830
1831 int _tokenizeStringInterpolation(int start) {
1832 _beginToken();
1833 int next = _reader.advance();
1834 if (next == 0x7B) {
1835 return _tokenizeInterpolatedExpression(next, start);
1836 } else {
1837 return _tokenizeInterpolatedIdentifier(next, start);
1838 }
1839 }
1840
1841 int _tokenizeTag(int next) {
1842 // # or #!.*[\n\r]
1843 if (_reader.offset == 0) {
1844 if (_reader.peek() == 0x21) {
1845 do {
1846 next = _reader.advance();
1847 } while (next != 0xA && next != 0xD && next > 0);
1848 _appendStringToken(TokenType.SCRIPT_TAG, _reader.getString(_tokenStart, 0));
1849 return next;
1850 }
1851 }
1852 _appendTokenOfType(TokenType.HASH);
1853 return _reader.advance();
1854 }
1855
1856 int _tokenizeTilde(int next) {
1857 // ~ ~/ ~/=
1858 next = _reader.advance();
1859 if (next == 0x2F) {
1860 return _select(0x3D, TokenType.TILDE_SLASH_EQ, TokenType.TILDE_SLASH);
1861 } else {
1862 _appendTokenOfType(TokenType.TILDE);
1863 return next;
1864 }
1865 }
1866 }
1867
1868 /**
1869 * The enumeration `ScannerErrorCode` defines the error codes used for errors de tected by the
1870 * scanner.
1871 */
1872 class ScannerErrorCode extends Enum<ScannerErrorCode> implements ErrorCode {
1873 static const ScannerErrorCode ILLEGAL_CHARACTER = const ScannerErrorCode.con1( 'ILLEGAL_CHARACTER', 0, "Illegal character {0}");
1874
1875 static const ScannerErrorCode MISSING_DIGIT = const ScannerErrorCode.con1('MIS SING_DIGIT', 1, "Decimal digit expected");
1876
1877 static const ScannerErrorCode MISSING_HEX_DIGIT = const ScannerErrorCode.con1( 'MISSING_HEX_DIGIT', 2, "Hexidecimal digit expected");
1878
1879 static const ScannerErrorCode MISSING_QUOTE = const ScannerErrorCode.con1('MIS SING_QUOTE', 3, "Expected quote (' or \")");
1880
1881 static const ScannerErrorCode UNTERMINATED_MULTI_LINE_COMMENT = const ScannerE rrorCode.con1('UNTERMINATED_MULTI_LINE_COMMENT', 4, "Unterminated multi-line com ment");
1882
1883 static const ScannerErrorCode UNTERMINATED_STRING_LITERAL = const ScannerError Code.con1('UNTERMINATED_STRING_LITERAL', 5, "Unterminated string literal");
1884
1885 static const List<ScannerErrorCode> values = const [
1886 ILLEGAL_CHARACTER,
1887 MISSING_DIGIT,
1888 MISSING_HEX_DIGIT,
1889 MISSING_QUOTE,
1890 UNTERMINATED_MULTI_LINE_COMMENT,
1891 UNTERMINATED_STRING_LITERAL];
1892
1893 /**
1894 * The template used to create the message to be displayed for this error.
1895 */
1896 final String message;
1897
1898 /**
1899 * The template used to create the correction to be displayed for this error, or `null` if
1900 * there is no correction information for this error.
1901 */
1902 final String correction;
1903
1904 /**
1905 * Initialize a newly created error code to have the given message.
1906 *
1907 * @param message the message template used to create the message to be displa yed for this error
1908 */
1909 const ScannerErrorCode.con1(String name, int ordinal, String message) : this.c on2(name, ordinal, message, null);
1910
1911 /**
1912 * Initialize a newly created error code to have the given message and correct ion.
1913 *
1914 * @param message the template used to create the message to be displayed for the error
1915 * @param correction the template used to create the correction to be displaye d for the error
1916 */
1917 const ScannerErrorCode.con2(String name, int ordinal, this.message, this.corre ction) : super(name, ordinal);
1918
1919 @override
1920 ErrorSeverity get errorSeverity => ErrorSeverity.ERROR;
1921
1922 @override
1923 ErrorType get type => ErrorType.SYNTACTIC_ERROR;
1924
1925 @override
1926 String get uniqueName => "${runtimeType.toString()}.${name}";
1927 }
1928
1929 /**
1930 * Instances of the class `StringToken` represent a token whose value is indepen dent of it's
1931 * type.
1932 */
1933 class StringToken extends Token {
1934 /**
1935 * The lexeme represented by this token.
1936 */
1937 String _value;
1938
1939 /**
1940 * Initialize a newly created token to represent a token of the given type wit h the given value.
1941 *
1942 * @param type the type of the token
1943 * @param value the lexeme represented by this token
1944 * @param offset the offset from the beginning of the file to the first charac ter in the token
1945 */
1946 StringToken(TokenType type, String value, int offset) : super(type, offset) {
1947 this._value = StringUtilities.intern(value);
1948 }
1949
1950 @override
1951 Token copy() => new StringToken(type, _value, offset);
1952
1953 @override
1954 String get lexeme => _value;
1955
1956 @override
1957 String value() => _value;
1958 }
1959
1960 /**
1961 * Instances of the class `TokenWithComment` represent a string token that is pr eceded by
1962 * comments.
1963 */
1964 class StringTokenWithComment extends StringToken {
1965 /**
1966 * The first comment in the list of comments that precede this token.
1967 */
1968 final Token _precedingComment;
1969
1970 /**
1971 * Initialize a newly created token to have the given type and offset and to b e preceded by the
1972 * comments reachable from the given comment.
1973 *
1974 * @param type the type of the token
1975 * @param offset the offset from the beginning of the file to the first charac ter in the token
1976 * @param precedingComment the first comment in the list of comments that prec ede this token
1977 */
1978 StringTokenWithComment(TokenType type, String value, int offset, this._precedi ngComment) : super(type, value, offset);
1979
1980 @override
1981 Token copy() => new StringTokenWithComment(type, lexeme, offset, copyComments( _precedingComment));
1982
1983 @override
1984 Token get precedingComments => _precedingComment;
1985
1986 @override
1987 void applyDelta(int delta) {
1988 super.applyDelta(delta);
1989 Token token = _precedingComment;
1990 while (token != null) {
1991 token.applyDelta(delta);
1992 token = token.next;
1993 }
1994 }
1995 }
1996
1997 /**
1998 * Instances of the class `SubSequenceReader` implement a [CharacterReader] that reads
1999 * characters from a character sequence, but adds a delta when reporting the cur rent character
2000 * offset so that the character sequence can be a subsequence from a larger sequ ence.
2001 */
2002 class SubSequenceReader extends CharSequenceReader {
2003 /**
2004 * The offset from the beginning of the file to the beginning of the source be ing scanned.
2005 */
2006 final int _offsetDelta;
2007
2008 /**
2009 * Initialize a newly created reader to read the characters in the given seque nce.
2010 *
2011 * @param sequence the sequence from which characters will be read
2012 * @param offsetDelta the offset from the beginning of the file to the beginni ng of the source
2013 * being scanned
2014 */
2015 SubSequenceReader(String sequence, this._offsetDelta) : super(sequence);
2016
2017 @override
2018 int get offset => _offsetDelta + super.offset;
2019
2020 @override
2021 String getString(int start, int endDelta) => super.getString(start - _offsetDe lta, endDelta);
2022
2023 @override
2024 void set offset(int offset) {
2025 super.offset = offset - _offsetDelta;
2026 }
2027 }
2028
2029 /**
2030 * Synthetic `StringToken` represent a token whose value is independent of it's type.
2031 */
2032 class SyntheticStringToken extends StringToken {
2033 /**
2034 * Initialize a newly created token to represent a token of the given type wit h the given value.
2035 *
2036 * @param type the type of the token
2037 * @param value the lexeme represented by this token
2038 * @param offset the offset from the beginning of the file to the first charac ter in the token
2039 */
2040 SyntheticStringToken(TokenType type, String value, int offset) : super(type, v alue, offset);
2041
2042 @override
2043 bool get isSynthetic => true;
2044 }
2045
2046 /**
2047 * Instances of the class `Token` represent a token that was scanned from the in put. Each
2048 * token knows which token follows it, acting as the head of a linked list of to kens.
2049 */
2050 class Token {
2051 /**
2052 * The type of the token.
2053 */
2054 final TokenType type;
2055
2056 /**
2057 * The offset from the beginning of the file to the first character in the tok en.
2058 */
2059 int offset = 0;
2060
2061 /**
2062 * The previous token in the token stream.
2063 */
2064 Token previous;
2065
2066 /**
2067 * The next token in the token stream.
2068 */
2069 Token _next;
2070
2071 /**
2072 * Initialize a newly created token to have the given type and offset.
2073 *
2074 * @param type the type of the token
2075 * @param offset the offset from the beginning of the file to the first charac ter in the token
2076 */
2077 Token(this.type, int offset) {
2078 this.offset = offset;
2079 }
2080
2081 /**
2082 * Return a newly created token that is a copy of this token but that is not a part of any token
2083 * stream.
2084 *
2085 * @return a newly created token that is a copy of this token
2086 */
2087 Token copy() => new Token(type, offset);
2088
2089 /**
2090 * Return the offset from the beginning of the file to the character after las t character of the
2091 * token.
2092 *
2093 * @return the offset from the beginning of the file to the first character af ter last character
2094 * of the token
2095 */
2096 int get end => offset + length;
2097
2098 /**
2099 * Return the number of characters in the node's source range.
2100 *
2101 * @return the number of characters in the node's source range
2102 */
2103 int get length => lexeme.length;
2104
2105 /**
2106 * Return the lexeme that represents this token.
2107 *
2108 * @return the lexeme that represents this token
2109 */
2110 String get lexeme => type.lexeme;
2111
2112 /**
2113 * Return the next token in the token stream.
2114 *
2115 * @return the next token in the token stream
2116 */
2117 Token get next => _next;
2118
2119 /**
2120 * Return the first comment in the list of comments that precede this token, o r `null` if
2121 * there are no comments preceding this token. Additional comments can be reac hed by following the
2122 * token stream using [getNext] until `null` is returned.
2123 *
2124 * @return the first comment in the list of comments that precede this token
2125 */
2126 Token get precedingComments => null;
2127
2128 /**
2129 * Return `true` if this token represents an operator.
2130 *
2131 * @return `true` if this token represents an operator
2132 */
2133 bool get isOperator => type.isOperator;
2134
2135 /**
2136 * Return `true` if this token is a synthetic token. A synthetic token is a to ken that was
2137 * introduced by the parser in order to recover from an error in the code.
2138 *
2139 * @return `true` if this token is a synthetic token
2140 */
2141 bool get isSynthetic => length == 0;
2142
2143 /**
2144 * Return `true` if this token represents an operator that can be defined by u sers.
2145 *
2146 * @return `true` if this token represents an operator that can be defined by users
2147 */
2148 bool get isUserDefinableOperator => type.isUserDefinableOperator;
2149
2150 /**
2151 * Return `true` if this token has any one of the given types.
2152 *
2153 * @param types the types of token that are being tested for
2154 * @return `true` if this token has any of the given types
2155 */
2156 bool matchesAny(List<TokenType> types) {
2157 for (TokenType type in types) {
2158 if (this.type == type) {
2159 return true;
2160 }
2161 }
2162 return false;
2163 }
2164
2165 /**
2166 * Set the next token in the token stream to the given token. This has the sid e-effect of setting
2167 * this token to be the previous token for the given token.
2168 *
2169 * @param token the next token in the token stream
2170 * @return the token that was passed in
2171 */
2172 Token setNext(Token token) {
2173 _next = token;
2174 token.previous = this;
2175 return token;
2176 }
2177
2178 /**
2179 * Set the next token in the token stream to the given token without changing which token is the
2180 * previous token for the given token.
2181 *
2182 * @param token the next token in the token stream
2183 * @return the token that was passed in
2184 */
2185 Token setNextWithoutSettingPrevious(Token token) {
2186 _next = token;
2187 return token;
2188 }
2189
2190 @override
2191 String toString() => lexeme;
2192
2193 /**
2194 * Return the value of this token. For keyword tokens, this is the keyword ass ociated with the
2195 * token, for other tokens it is the lexeme associated with the token.
2196 *
2197 * @return the value of this token
2198 */
2199 Object value() => type.lexeme;
2200
2201 /**
2202 * Apply (add) the given delta to this token's offset.
2203 *
2204 * @param delta the amount by which the offset is to be adjusted
2205 */
2206 void applyDelta(int delta) {
2207 offset += delta;
2208 }
2209
2210 /**
2211 * Copy a linked list of comment tokens identical to the given comment tokens.
2212 *
2213 * @param token the first token in the list, or `null` if there are no tokens to be copied
2214 * @return the tokens that were created
2215 */
2216 Token copyComments(Token token) {
2217 if (token == null) {
2218 return null;
2219 }
2220 Token head = token.copy();
2221 Token tail = head;
2222 token = token.next;
2223 while (token != null) {
2224 tail = tail.setNext(token.copy());
2225 token = token.next;
2226 }
2227 return head;
2228 }
2229 }
2230
2231 /**
2232 * The enumeration `TokenClass` represents classes (or groups) of tokens with a similar use.
2233 */
2234 class TokenClass extends Enum<TokenClass> {
2235 /**
2236 * A value used to indicate that the token type is not part of any specific cl ass of token.
2237 */
2238 static const TokenClass NO_CLASS = const TokenClass.con1('NO_CLASS', 0);
2239
2240 /**
2241 * A value used to indicate that the token type is an additive operator.
2242 */
2243 static const TokenClass ADDITIVE_OPERATOR = const TokenClass.con2('ADDITIVE_OP ERATOR', 1, 12);
2244
2245 /**
2246 * A value used to indicate that the token type is an assignment operator.
2247 */
2248 static const TokenClass ASSIGNMENT_OPERATOR = const TokenClass.con2('ASSIGNMEN T_OPERATOR', 2, 1);
2249
2250 /**
2251 * A value used to indicate that the token type is a bitwise-and operator.
2252 */
2253 static const TokenClass BITWISE_AND_OPERATOR = const TokenClass.con2('BITWISE_ AND_OPERATOR', 3, 10);
2254
2255 /**
2256 * A value used to indicate that the token type is a bitwise-or operator.
2257 */
2258 static const TokenClass BITWISE_OR_OPERATOR = const TokenClass.con2('BITWISE_O R_OPERATOR', 4, 8);
2259
2260 /**
2261 * A value used to indicate that the token type is a bitwise-xor operator.
2262 */
2263 static const TokenClass BITWISE_XOR_OPERATOR = const TokenClass.con2('BITWISE_ XOR_OPERATOR', 5, 9);
2264
2265 /**
2266 * A value used to indicate that the token type is a cascade operator.
2267 */
2268 static const TokenClass CASCADE_OPERATOR = const TokenClass.con2('CASCADE_OPER ATOR', 6, 2);
2269
2270 /**
2271 * A value used to indicate that the token type is a conditional operator.
2272 */
2273 static const TokenClass CONDITIONAL_OPERATOR = const TokenClass.con2('CONDITIO NAL_OPERATOR', 7, 3);
2274
2275 /**
2276 * A value used to indicate that the token type is an equality operator.
2277 */
2278 static const TokenClass EQUALITY_OPERATOR = const TokenClass.con2('EQUALITY_OP ERATOR', 8, 6);
2279
2280 /**
2281 * A value used to indicate that the token type is a logical-and operator.
2282 */
2283 static const TokenClass LOGICAL_AND_OPERATOR = const TokenClass.con2('LOGICAL_ AND_OPERATOR', 9, 5);
2284
2285 /**
2286 * A value used to indicate that the token type is a logical-or operator.
2287 */
2288 static const TokenClass LOGICAL_OR_OPERATOR = const TokenClass.con2('LOGICAL_O R_OPERATOR', 10, 4);
2289
2290 /**
2291 * A value used to indicate that the token type is a multiplicative operator.
2292 */
2293 static const TokenClass MULTIPLICATIVE_OPERATOR = const TokenClass.con2('MULTI PLICATIVE_OPERATOR', 11, 13);
2294
2295 /**
2296 * A value used to indicate that the token type is a relational operator.
2297 */
2298 static const TokenClass RELATIONAL_OPERATOR = const TokenClass.con2('RELATIONA L_OPERATOR', 12, 7);
2299
2300 /**
2301 * A value used to indicate that the token type is a shift operator.
2302 */
2303 static const TokenClass SHIFT_OPERATOR = const TokenClass.con2('SHIFT_OPERATOR ', 13, 11);
2304
2305 /**
2306 * A value used to indicate that the token type is a unary operator.
2307 */
2308 static const TokenClass UNARY_POSTFIX_OPERATOR = const TokenClass.con2('UNARY_ POSTFIX_OPERATOR', 14, 15);
2309
2310 /**
2311 * A value used to indicate that the token type is a unary operator.
2312 */
2313 static const TokenClass UNARY_PREFIX_OPERATOR = const TokenClass.con2('UNARY_P REFIX_OPERATOR', 15, 14);
2314
2315 static const List<TokenClass> values = const [
2316 NO_CLASS,
2317 ADDITIVE_OPERATOR,
2318 ASSIGNMENT_OPERATOR,
2319 BITWISE_AND_OPERATOR,
2320 BITWISE_OR_OPERATOR,
2321 BITWISE_XOR_OPERATOR,
2322 CASCADE_OPERATOR,
2323 CONDITIONAL_OPERATOR,
2324 EQUALITY_OPERATOR,
2325 LOGICAL_AND_OPERATOR,
2326 LOGICAL_OR_OPERATOR,
2327 MULTIPLICATIVE_OPERATOR,
2328 RELATIONAL_OPERATOR,
2329 SHIFT_OPERATOR,
2330 UNARY_POSTFIX_OPERATOR,
2331 UNARY_PREFIX_OPERATOR];
2332
2333 /**
2334 * The precedence of tokens of this class, or `0` if the such tokens do not re present an
2335 * operator.
2336 */
2337 final int precedence;
2338
2339 const TokenClass.con1(String name, int ordinal) : this.con2(name, ordinal, 0);
2340
2341 const TokenClass.con2(String name, int ordinal, this.precedence) : super(name, ordinal);
2342 }
2343
2344 /**
2345 * The enumeration `TokenType` defines the types of tokens that can be returned by the
2346 * scanner.
2347 */
2348 class TokenType extends Enum<TokenType> {
2349 /**
2350 * The type of the token that marks the end of the input.
2351 */
2352 static const TokenType EOF = const TokenType_EOF('EOF', 0, TokenClass.NO_CLASS , "");
2353
2354 static const TokenType DOUBLE = const TokenType.con1('DOUBLE', 1);
2355
2356 static const TokenType HEXADECIMAL = const TokenType.con1('HEXADECIMAL', 2);
2357
2358 static const TokenType IDENTIFIER = const TokenType.con1('IDENTIFIER', 3);
2359
2360 static const TokenType INT = const TokenType.con1('INT', 4);
2361
2362 static const TokenType KEYWORD = const TokenType.con1('KEYWORD', 5);
2363
2364 static const TokenType MULTI_LINE_COMMENT = const TokenType.con1('MULTI_LINE_C OMMENT', 6);
2365
2366 static const TokenType SCRIPT_TAG = const TokenType.con1('SCRIPT_TAG', 7);
2367
2368 static const TokenType SINGLE_LINE_COMMENT = const TokenType.con1('SINGLE_LINE _COMMENT', 8);
2369
2370 static const TokenType STRING = const TokenType.con1('STRING', 9);
2371
2372 static const TokenType AMPERSAND = const TokenType.con2('AMPERSAND', 10, Token Class.BITWISE_AND_OPERATOR, "&");
2373
2374 static const TokenType AMPERSAND_AMPERSAND = const TokenType.con2('AMPERSAND_A MPERSAND', 11, TokenClass.LOGICAL_AND_OPERATOR, "&&");
2375
2376 static const TokenType AMPERSAND_EQ = const TokenType.con2('AMPERSAND_EQ', 12, TokenClass.ASSIGNMENT_OPERATOR, "&=");
2377
2378 static const TokenType AT = const TokenType.con2('AT', 13, TokenClass.NO_CLASS , "@");
2379
2380 static const TokenType BANG = const TokenType.con2('BANG', 14, TokenClass.UNAR Y_PREFIX_OPERATOR, "!");
2381
2382 static const TokenType BANG_EQ = const TokenType.con2('BANG_EQ', 15, TokenClas s.EQUALITY_OPERATOR, "!=");
2383
2384 static const TokenType BAR = const TokenType.con2('BAR', 16, TokenClass.BITWIS E_OR_OPERATOR, "|");
2385
2386 static const TokenType BAR_BAR = const TokenType.con2('BAR_BAR', 17, TokenClas s.LOGICAL_OR_OPERATOR, "||");
2387
2388 static const TokenType BAR_EQ = const TokenType.con2('BAR_EQ', 18, TokenClass. ASSIGNMENT_OPERATOR, "|=");
2389
2390 static const TokenType COLON = const TokenType.con2('COLON', 19, TokenClass.NO _CLASS, ":");
2391
2392 static const TokenType COMMA = const TokenType.con2('COMMA', 20, TokenClass.NO _CLASS, ",");
2393
2394 static const TokenType CARET = const TokenType.con2('CARET', 21, TokenClass.BI TWISE_XOR_OPERATOR, "^");
2395
2396 static const TokenType CARET_EQ = const TokenType.con2('CARET_EQ', 22, TokenCl ass.ASSIGNMENT_OPERATOR, "^=");
2397
2398 static const TokenType CLOSE_CURLY_BRACKET = const TokenType.con2('CLOSE_CURLY _BRACKET', 23, TokenClass.NO_CLASS, "}");
2399
2400 static const TokenType CLOSE_PAREN = const TokenType.con2('CLOSE_PAREN', 24, T okenClass.NO_CLASS, ")");
2401
2402 static const TokenType CLOSE_SQUARE_BRACKET = const TokenType.con2('CLOSE_SQUA RE_BRACKET', 25, TokenClass.NO_CLASS, "]");
2403
2404 static const TokenType EQ = const TokenType.con2('EQ', 26, TokenClass.ASSIGNME NT_OPERATOR, "=");
2405
2406 static const TokenType EQ_EQ = const TokenType.con2('EQ_EQ', 27, TokenClass.EQ UALITY_OPERATOR, "==");
2407
2408 static const TokenType FUNCTION = const TokenType.con2('FUNCTION', 28, TokenCl ass.NO_CLASS, "=>");
2409
2410 static const TokenType GT = const TokenType.con2('GT', 29, TokenClass.RELATION AL_OPERATOR, ">");
2411
2412 static const TokenType GT_EQ = const TokenType.con2('GT_EQ', 30, TokenClass.RE LATIONAL_OPERATOR, ">=");
2413
2414 static const TokenType GT_GT = const TokenType.con2('GT_GT', 31, TokenClass.SH IFT_OPERATOR, ">>");
2415
2416 static const TokenType GT_GT_EQ = const TokenType.con2('GT_GT_EQ', 32, TokenCl ass.ASSIGNMENT_OPERATOR, ">>=");
2417
2418 static const TokenType HASH = const TokenType.con2('HASH', 33, TokenClass.NO_C LASS, "#");
2419
2420 static const TokenType INDEX = const TokenType.con2('INDEX', 34, TokenClass.UN ARY_POSTFIX_OPERATOR, "[]");
2421
2422 static const TokenType INDEX_EQ = const TokenType.con2('INDEX_EQ', 35, TokenCl ass.UNARY_POSTFIX_OPERATOR, "[]=");
2423
2424 static const TokenType IS = const TokenType.con2('IS', 36, TokenClass.RELATION AL_OPERATOR, "is");
2425
2426 static const TokenType LT = const TokenType.con2('LT', 37, TokenClass.RELATION AL_OPERATOR, "<");
2427
2428 static const TokenType LT_EQ = const TokenType.con2('LT_EQ', 38, TokenClass.RE LATIONAL_OPERATOR, "<=");
2429
2430 static const TokenType LT_LT = const TokenType.con2('LT_LT', 39, TokenClass.SH IFT_OPERATOR, "<<");
2431
2432 static const TokenType LT_LT_EQ = const TokenType.con2('LT_LT_EQ', 40, TokenCl ass.ASSIGNMENT_OPERATOR, "<<=");
2433
2434 static const TokenType MINUS = const TokenType.con2('MINUS', 41, TokenClass.AD DITIVE_OPERATOR, "-");
2435
2436 static const TokenType MINUS_EQ = const TokenType.con2('MINUS_EQ', 42, TokenCl ass.ASSIGNMENT_OPERATOR, "-=");
2437
2438 static const TokenType MINUS_MINUS = const TokenType.con2('MINUS_MINUS', 43, T okenClass.UNARY_PREFIX_OPERATOR, "--");
2439
2440 static const TokenType OPEN_CURLY_BRACKET = const TokenType.con2('OPEN_CURLY_B RACKET', 44, TokenClass.NO_CLASS, "{");
2441
2442 static const TokenType OPEN_PAREN = const TokenType.con2('OPEN_PAREN', 45, Tok enClass.UNARY_POSTFIX_OPERATOR, "(");
2443
2444 static const TokenType OPEN_SQUARE_BRACKET = const TokenType.con2('OPEN_SQUARE _BRACKET', 46, TokenClass.UNARY_POSTFIX_OPERATOR, "[");
2445
2446 static const TokenType PERCENT = const TokenType.con2('PERCENT', 47, TokenClas s.MULTIPLICATIVE_OPERATOR, "%");
2447
2448 static const TokenType PERCENT_EQ = const TokenType.con2('PERCENT_EQ', 48, Tok enClass.ASSIGNMENT_OPERATOR, "%=");
2449
2450 static const TokenType PERIOD = const TokenType.con2('PERIOD', 49, TokenClass. UNARY_POSTFIX_OPERATOR, ".");
2451
2452 static const TokenType PERIOD_PERIOD = const TokenType.con2('PERIOD_PERIOD', 5 0, TokenClass.CASCADE_OPERATOR, "..");
2453
2454 static const TokenType PLUS = const TokenType.con2('PLUS', 51, TokenClass.ADDI TIVE_OPERATOR, "+");
2455
2456 static const TokenType PLUS_EQ = const TokenType.con2('PLUS_EQ', 52, TokenClas s.ASSIGNMENT_OPERATOR, "+=");
2457
2458 static const TokenType PLUS_PLUS = const TokenType.con2('PLUS_PLUS', 53, Token Class.UNARY_PREFIX_OPERATOR, "++");
2459
2460 static const TokenType QUESTION = const TokenType.con2('QUESTION', 54, TokenCl ass.CONDITIONAL_OPERATOR, "?");
2461
2462 static const TokenType SEMICOLON = const TokenType.con2('SEMICOLON', 55, Token Class.NO_CLASS, ";");
2463
2464 static const TokenType SLASH = const TokenType.con2('SLASH', 56, TokenClass.MU LTIPLICATIVE_OPERATOR, "/");
2465
2466 static const TokenType SLASH_EQ = const TokenType.con2('SLASH_EQ', 57, TokenCl ass.ASSIGNMENT_OPERATOR, "/=");
2467
2468 static const TokenType STAR = const TokenType.con2('STAR', 58, TokenClass.MULT IPLICATIVE_OPERATOR, "*");
2469
2470 static const TokenType STAR_EQ = const TokenType.con2('STAR_EQ', 59, TokenClas s.ASSIGNMENT_OPERATOR, "*=");
2471
2472 static const TokenType STRING_INTERPOLATION_EXPRESSION = const TokenType.con2( 'STRING_INTERPOLATION_EXPRESSION', 60, TokenClass.NO_CLASS, "\${");
2473
2474 static const TokenType STRING_INTERPOLATION_IDENTIFIER = const TokenType.con2( 'STRING_INTERPOLATION_IDENTIFIER', 61, TokenClass.NO_CLASS, "\$");
2475
2476 static const TokenType TILDE = const TokenType.con2('TILDE', 62, TokenClass.UN ARY_PREFIX_OPERATOR, "~");
2477
2478 static const TokenType TILDE_SLASH = const TokenType.con2('TILDE_SLASH', 63, T okenClass.MULTIPLICATIVE_OPERATOR, "~/");
2479
2480 static const TokenType TILDE_SLASH_EQ = const TokenType.con2('TILDE_SLASH_EQ', 64, TokenClass.ASSIGNMENT_OPERATOR, "~/=");
2481
2482 static const TokenType BACKPING = const TokenType.con2('BACKPING', 65, TokenCl ass.NO_CLASS, "`");
2483
2484 static const TokenType BACKSLASH = const TokenType.con2('BACKSLASH', 66, Token Class.NO_CLASS, "\\");
2485
2486 static const TokenType PERIOD_PERIOD_PERIOD = const TokenType.con2('PERIOD_PER IOD_PERIOD', 67, TokenClass.NO_CLASS, "...");
2487
2488 static const List<TokenType> values = const [
2489 EOF,
2490 DOUBLE,
2491 HEXADECIMAL,
2492 IDENTIFIER,
2493 INT,
2494 KEYWORD,
2495 MULTI_LINE_COMMENT,
2496 SCRIPT_TAG,
2497 SINGLE_LINE_COMMENT,
2498 STRING,
2499 AMPERSAND,
2500 AMPERSAND_AMPERSAND,
2501 AMPERSAND_EQ,
2502 AT,
2503 BANG,
2504 BANG_EQ,
2505 BAR,
2506 BAR_BAR,
2507 BAR_EQ,
2508 COLON,
2509 COMMA,
2510 CARET,
2511 CARET_EQ,
2512 CLOSE_CURLY_BRACKET,
2513 CLOSE_PAREN,
2514 CLOSE_SQUARE_BRACKET,
2515 EQ,
2516 EQ_EQ,
2517 FUNCTION,
2518 GT,
2519 GT_EQ,
2520 GT_GT,
2521 GT_GT_EQ,
2522 HASH,
2523 INDEX,
2524 INDEX_EQ,
2525 IS,
2526 LT,
2527 LT_EQ,
2528 LT_LT,
2529 LT_LT_EQ,
2530 MINUS,
2531 MINUS_EQ,
2532 MINUS_MINUS,
2533 OPEN_CURLY_BRACKET,
2534 OPEN_PAREN,
2535 OPEN_SQUARE_BRACKET,
2536 PERCENT,
2537 PERCENT_EQ,
2538 PERIOD,
2539 PERIOD_PERIOD,
2540 PLUS,
2541 PLUS_EQ,
2542 PLUS_PLUS,
2543 QUESTION,
2544 SEMICOLON,
2545 SLASH,
2546 SLASH_EQ,
2547 STAR,
2548 STAR_EQ,
2549 STRING_INTERPOLATION_EXPRESSION,
2550 STRING_INTERPOLATION_IDENTIFIER,
2551 TILDE,
2552 TILDE_SLASH,
2553 TILDE_SLASH_EQ,
2554 BACKPING,
2555 BACKSLASH,
2556 PERIOD_PERIOD_PERIOD];
2557
2558 /**
2559 * The class of the token.
2560 */
2561 final TokenClass _tokenClass;
2562
2563 /**
2564 * The lexeme that defines this type of token, or `null` if there is more than one possible
2565 * lexeme for this type of token.
2566 */
2567 final String lexeme;
2568
2569 const TokenType.con1(String name, int ordinal) : this.con2(name, ordinal, Toke nClass.NO_CLASS, null);
2570
2571 const TokenType.con2(String name, int ordinal, this._tokenClass, this.lexeme) : super(name, ordinal);
2572
2573 /**
2574 * Return the precedence of the token, or `0` if the token does not represent an operator.
2575 *
2576 * @return the precedence of the token
2577 */
2578 int get precedence => _tokenClass.precedence;
2579
2580 /**
2581 * Return `true` if this type of token represents an additive operator.
2582 *
2583 * @return `true` if this type of token represents an additive operator
2584 */
2585 bool get isAdditiveOperator => _tokenClass == TokenClass.ADDITIVE_OPERATOR;
2586
2587 /**
2588 * Return `true` if this type of token represents an assignment operator.
2589 *
2590 * @return `true` if this type of token represents an assignment operator
2591 */
2592 bool get isAssignmentOperator => _tokenClass == TokenClass.ASSIGNMENT_OPERATOR ;
2593
2594 /**
2595 * Return `true` if this type of token represents an associative operator. An associative
2596 * operator is an operator for which the following equality is true:
2597 * `(a * b) * c == a * (b * c)`. In other words, if the result of applying the operator to
2598 * multiple operands does not depend on the order in which those applications occur.
2599 *
2600 * Note: This method considers the logical-and and logical-or operators to be associative, even
2601 * though the order in which the application of those operators can have an ef fect because
2602 * evaluation of the right-hand operand is conditional.
2603 *
2604 * @return `true` if this type of token represents an associative operator
2605 */
2606 bool get isAssociativeOperator => this == AMPERSAND || this == AMPERSAND_AMPER SAND || this == BAR || this == BAR_BAR || this == CARET || this == PLUS || this == STAR;
2607
2608 /**
2609 * Return `true` if this type of token represents an equality operator.
2610 *
2611 * @return `true` if this type of token represents an equality operator
2612 */
2613 bool get isEqualityOperator => _tokenClass == TokenClass.EQUALITY_OPERATOR;
2614
2615 /**
2616 * Return `true` if this type of token represents an increment operator.
2617 *
2618 * @return `true` if this type of token represents an increment operator
2619 */
2620 bool get isIncrementOperator => identical(lexeme, "++") || identical(lexeme, " --");
2621
2622 /**
2623 * Return `true` if this type of token represents a multiplicative operator.
2624 *
2625 * @return `true` if this type of token represents a multiplicative operator
2626 */
2627 bool get isMultiplicativeOperator => _tokenClass == TokenClass.MULTIPLICATIVE_ OPERATOR;
2628
2629 /**
2630 * Return `true` if this token type represents an operator.
2631 *
2632 * @return `true` if this token type represents an operator
2633 */
2634 bool get isOperator => _tokenClass != TokenClass.NO_CLASS && this != OPEN_PARE N && this != OPEN_SQUARE_BRACKET && this != PERIOD;
2635
2636 /**
2637 * Return `true` if this type of token represents a relational operator.
2638 *
2639 * @return `true` if this type of token represents a relational operator
2640 */
2641 bool get isRelationalOperator => _tokenClass == TokenClass.RELATIONAL_OPERATOR ;
2642
2643 /**
2644 * Return `true` if this type of token represents a shift operator.
2645 *
2646 * @return `true` if this type of token represents a shift operator
2647 */
2648 bool get isShiftOperator => _tokenClass == TokenClass.SHIFT_OPERATOR;
2649
2650 /**
2651 * Return `true` if this type of token represents a unary postfix operator.
2652 *
2653 * @return `true` if this type of token represents a unary postfix operator
2654 */
2655 bool get isUnaryPostfixOperator => _tokenClass == TokenClass.UNARY_POSTFIX_OPE RATOR;
2656
2657 /**
2658 * Return `true` if this type of token represents a unary prefix operator.
2659 *
2660 * @return `true` if this type of token represents a unary prefix operator
2661 */
2662 bool get isUnaryPrefixOperator => _tokenClass == TokenClass.UNARY_PREFIX_OPERA TOR;
2663
2664 /**
2665 * Return `true` if this token type represents an operator that can be defined by users.
2666 *
2667 * @return `true` if this token type represents an operator that can be define d by users
2668 */
2669 bool get isUserDefinableOperator => identical(lexeme, "==") || identical(lexem e, "~") || identical(lexeme, "[]") || identical(lexeme, "[]=") || identical(lexe me, "*") || identical(lexeme, "/") || identical(lexeme, "%") || identical(lexeme , "~/") || identical(lexeme, "+") || identical(lexeme, "-") || identical(lexeme, "<<") || identical(lexeme, ">>") || identical(lexeme, ">=") || identical(lexeme , ">") || identical(lexeme, "<=") || identical(lexeme, "<") || identical(lexeme, "&") || identical(lexeme, "^") || identical(lexeme, "|");
2670 }
2671
2672 class TokenType_EOF extends TokenType {
2673 const TokenType_EOF(String name, int ordinal, TokenClass arg0, String arg1) : super.con2(name, ordinal, arg0, arg1);
2674
2675 @override
2676 String toString() => "-eof-";
2677 }
2678
2679 /**
2680 * Instances of the class `TokenWithComment` represent a normal token that is pr eceded by
2681 * comments.
2682 */
2683 class TokenWithComment extends Token {
2684 /**
2685 * The first comment in the list of comments that precede this token.
2686 */
2687 final Token _precedingComment;
2688
2689 /**
2690 * Initialize a newly created token to have the given type and offset and to b e preceded by the
2691 * comments reachable from the given comment.
2692 *
2693 * @param type the type of the token
2694 * @param offset the offset from the beginning of the file to the first charac ter in the token
2695 * @param precedingComment the first comment in the list of comments that prec ede this token
2696 */
2697 TokenWithComment(TokenType type, int offset, this._precedingComment) : super(t ype, offset);
2698
2699 @override
2700 Token copy() => new TokenWithComment(type, offset, _precedingComment);
2701
2702 @override
2703 Token get precedingComments => _precedingComment;
2704 }
OLDNEW
« no previous file with comments | « observatory_pub_packages/analyzer/src/generated/parser.dart ('k') | observatory_pub_packages/analyzer/src/generated/sdk.dart » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698