Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(169)

Side by Side Diff: pkg/analyzer-experimental/lib/src/generated/scanner.dart

Issue 12838003: Rename analyzer-experimental to analyzer_experimental. (Closed) Base URL: https://dart.googlecode.com/svn/branches/bleeding_edge/dart
Patch Set: Created 7 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // This code was auto-generated, is not intended to be edited, and is subject to
2 // significant change. Please see the README file for more information.
3
4 library engine.scanner;
5
6 import 'dart:collection';
7 import 'java_core.dart';
8 import 'source.dart';
9 import 'error.dart';
10 import 'instrumentation.dart';
11
12 /**
13 * Instances of the abstract class {@code KeywordState} represent a state in a s tate machine used to
14 * scan keywords.
15 * @coverage dart.engine.parser
16 */
17 class KeywordState {
18 /**
19 * An empty transition table used by leaf states.
20 */
21 static List<KeywordState> _EMPTY_TABLE = new List<KeywordState>(26);
22 /**
23 * The initial state in the state machine.
24 */
25 static KeywordState KEYWORD_STATE = createKeywordStateTable();
26 /**
27 * Create the next state in the state machine where we have already recognized the subset of
28 * strings in the given array of strings starting at the given offset and havi ng the given length.
29 * All of these strings have a common prefix and the next character is at the given start index.
30 * @param start the index of the character in the strings used to transition t o a new state
31 * @param strings an array containing all of the strings that will be recogniz ed by the state
32 * machine
33 * @param offset the offset of the first string in the array that has the pref ix that is assumed
34 * to have been recognized by the time we reach the state being built
35 * @param length the number of strings in the array that pass through the stat e being built
36 * @return the state that was created
37 */
38 static KeywordState computeKeywordStateTable(int start, List<String> strings, int offset, int length12) {
39 List<KeywordState> result = new List<KeywordState>(26);
40 assert(length12 != 0);
41 int chunk = 0x0;
42 int chunkStart = -1;
43 bool isLeaf = false;
44 for (int i = offset; i < offset + length12; i++) {
45 if (strings[i].length == start) {
46 isLeaf = true;
47 }
48 if (strings[i].length > start) {
49 int c = strings[i].codeUnitAt(start);
50 if (chunk != c) {
51 if (chunkStart != -1) {
52 result[chunk - 0x61] = computeKeywordStateTable(start + 1, strings, chunkStart, i - chunkStart);
53 }
54 chunkStart = i;
55 chunk = c;
56 }
57 }
58 }
59 if (chunkStart != -1) {
60 assert(result[chunk - 0x61] == null);
61 result[chunk - 0x61] = computeKeywordStateTable(start + 1, strings, chunkS tart, offset + length12 - chunkStart);
62 } else {
63 assert(length12 == 1);
64 return new KeywordState(_EMPTY_TABLE, strings[offset]);
65 }
66 if (isLeaf) {
67 return new KeywordState(result, strings[offset]);
68 } else {
69 return new KeywordState(result, null);
70 }
71 }
72 /**
73 * Create the initial state in the state machine.
74 * @return the state that was created
75 */
76 static KeywordState createKeywordStateTable() {
77 List<Keyword> values2 = Keyword.values;
78 List<String> strings = new List<String>(values2.length);
79 for (int i = 0; i < values2.length; i++) {
80 strings[i] = values2[i].syntax;
81 }
82 strings.sort();
83 return computeKeywordStateTable(0, strings, 0, strings.length);
84 }
85 /**
86 * A table mapping characters to the states to which those characters will tra nsition. (The index
87 * into the array is the offset from the character {@code 'a'} to the transiti oning character.)
88 */
89 List<KeywordState> _table;
90 /**
91 * The keyword that is recognized by this state, or {@code null} if this state is not a terminal
92 * state.
93 */
94 Keyword _keyword2;
95 /**
96 * Initialize a newly created state to have the given transitions and to recog nize the keyword
97 * with the given syntax.
98 * @param table a table mapping characters to the states to which those charac ters will transition
99 * @param syntax the syntax of the keyword that is recognized by the state
100 */
101 KeywordState(List<KeywordState> table, String syntax) {
102 this._table = table;
103 this._keyword2 = (syntax == null) ? null : Keyword.keywords[syntax];
104 }
105 /**
106 * Return the keyword that was recognized by this state, or {@code null} if th is state does not
107 * recognized a keyword.
108 * @return the keyword that was matched by reaching this state
109 */
110 Keyword keyword() => _keyword2;
111 /**
112 * Return the state that follows this state on a transition of the given chara cter, or{@code null} if there is no valid state reachable from this state with s uch a transition.
113 * @param c the character used to transition from this state to another state
114 * @return the state that follows this state on a transition of the given char acter
115 */
116 KeywordState next(int c) => _table[c - 0x61];
117 }
118 /**
119 * The enumeration {@code ScannerErrorCode} defines the error codes used for err ors detected by the
120 * scanner.
121 * @coverage dart.engine.parser
122 */
123 class ScannerErrorCode implements ErrorCode {
124 static final ScannerErrorCode ILLEGAL_CHARACTER = new ScannerErrorCode('ILLEGA L_CHARACTER', 0, "Illegal character %x");
125 static final ScannerErrorCode MISSING_DIGIT = new ScannerErrorCode('MISSING_DI GIT', 1, "Decimal digit expected");
126 static final ScannerErrorCode MISSING_HEX_DIGIT = new ScannerErrorCode('MISSIN G_HEX_DIGIT', 2, "Hexidecimal digit expected");
127 static final ScannerErrorCode MISSING_QUOTE = new ScannerErrorCode('MISSING_QU OTE', 3, "Expected quote (' or \")");
128 static final ScannerErrorCode UNTERMINATED_MULTI_LINE_COMMENT = new ScannerErr orCode('UNTERMINATED_MULTI_LINE_COMMENT', 4, "Unterminated multi-line comment");
129 static final ScannerErrorCode UNTERMINATED_STRING_LITERAL = new ScannerErrorCo de('UNTERMINATED_STRING_LITERAL', 5, "Unterminated string literal");
130 static final List<ScannerErrorCode> values = [ILLEGAL_CHARACTER, MISSING_DIGIT , MISSING_HEX_DIGIT, MISSING_QUOTE, UNTERMINATED_MULTI_LINE_COMMENT, UNTERMINATE D_STRING_LITERAL];
131 final String __name;
132 final int __ordinal;
133 int get ordinal => __ordinal;
134 /**
135 * The message template used to create the message to be displayed for this er ror.
136 */
137 String _message;
138 /**
139 * Initialize a newly created error code to have the given message.
140 * @param message the message template used to create the message to be displa yed for this error
141 */
142 ScannerErrorCode(this.__name, this.__ordinal, String message) {
143 this._message = message;
144 }
145 ErrorSeverity get errorSeverity => ErrorSeverity.ERROR;
146 String get message => _message;
147 ErrorType get type => ErrorType.SYNTACTIC_ERROR;
148 bool needsRecompilation() => true;
149 String toString() => __name;
150 }
151 /**
152 * Instances of the class {@code TokenWithComment} represent a string token that is preceded by
153 * comments.
154 * @coverage dart.engine.parser
155 */
156 class StringTokenWithComment extends StringToken {
157 /**
158 * The first comment in the list of comments that precede this token.
159 */
160 Token _precedingComment;
161 /**
162 * Initialize a newly created token to have the given type and offset and to b e preceded by the
163 * comments reachable from the given comment.
164 * @param type the type of the token
165 * @param offset the offset from the beginning of the file to the first charac ter in the token
166 * @param precedingComment the first comment in the list of comments that prec ede this token
167 */
168 StringTokenWithComment(TokenType type, String value, int offset, Token precedi ngComment) : super(type, value, offset) {
169 this._precedingComment = precedingComment;
170 }
171 Token get precedingComments => _precedingComment;
172 }
173 /**
174 * The enumeration {@code Keyword} defines the keywords in the Dart programming language.
175 * @coverage dart.engine.parser
176 */
177 class Keyword {
178 static final Keyword ASSERT = new Keyword.con1('ASSERT', 0, "assert");
179 static final Keyword BREAK = new Keyword.con1('BREAK', 1, "break");
180 static final Keyword CASE = new Keyword.con1('CASE', 2, "case");
181 static final Keyword CATCH = new Keyword.con1('CATCH', 3, "catch");
182 static final Keyword CLASS = new Keyword.con1('CLASS', 4, "class");
183 static final Keyword CONST = new Keyword.con1('CONST', 5, "const");
184 static final Keyword CONTINUE = new Keyword.con1('CONTINUE', 6, "continue");
185 static final Keyword DEFAULT = new Keyword.con1('DEFAULT', 7, "default");
186 static final Keyword DO = new Keyword.con1('DO', 8, "do");
187 static final Keyword ELSE = new Keyword.con1('ELSE', 9, "else");
188 static final Keyword EXTENDS = new Keyword.con1('EXTENDS', 10, "extends");
189 static final Keyword FALSE = new Keyword.con1('FALSE', 11, "false");
190 static final Keyword FINAL = new Keyword.con1('FINAL', 12, "final");
191 static final Keyword FINALLY = new Keyword.con1('FINALLY', 13, "finally");
192 static final Keyword FOR = new Keyword.con1('FOR', 14, "for");
193 static final Keyword IF = new Keyword.con1('IF', 15, "if");
194 static final Keyword IN = new Keyword.con1('IN', 16, "in");
195 static final Keyword IS = new Keyword.con1('IS', 17, "is");
196 static final Keyword NEW = new Keyword.con1('NEW', 18, "new");
197 static final Keyword NULL = new Keyword.con1('NULL', 19, "null");
198 static final Keyword RETURN = new Keyword.con1('RETURN', 20, "return");
199 static final Keyword SUPER = new Keyword.con1('SUPER', 21, "super");
200 static final Keyword SWITCH = new Keyword.con1('SWITCH', 22, "switch");
201 static final Keyword THIS = new Keyword.con1('THIS', 23, "this");
202 static final Keyword THROW = new Keyword.con1('THROW', 24, "throw");
203 static final Keyword TRUE = new Keyword.con1('TRUE', 25, "true");
204 static final Keyword TRY = new Keyword.con1('TRY', 26, "try");
205 static final Keyword VAR = new Keyword.con1('VAR', 27, "var");
206 static final Keyword VOID = new Keyword.con1('VOID', 28, "void");
207 static final Keyword WHILE = new Keyword.con1('WHILE', 29, "while");
208 static final Keyword WITH = new Keyword.con1('WITH', 30, "with");
209 static final Keyword ABSTRACT = new Keyword.con2('ABSTRACT', 31, "abstract", t rue);
210 static final Keyword AS = new Keyword.con2('AS', 32, "as", true);
211 static final Keyword DYNAMIC = new Keyword.con2('DYNAMIC', 33, "dynamic", true );
212 static final Keyword EXPORT = new Keyword.con2('EXPORT', 34, "export", true);
213 static final Keyword EXTERNAL = new Keyword.con2('EXTERNAL', 35, "external", t rue);
214 static final Keyword FACTORY = new Keyword.con2('FACTORY', 36, "factory", true );
215 static final Keyword GET = new Keyword.con2('GET', 37, "get", true);
216 static final Keyword IMPLEMENTS = new Keyword.con2('IMPLEMENTS', 38, "implemen ts", true);
217 static final Keyword IMPORT = new Keyword.con2('IMPORT', 39, "import", true);
218 static final Keyword LIBRARY = new Keyword.con2('LIBRARY', 40, "library", true );
219 static final Keyword OPERATOR = new Keyword.con2('OPERATOR', 41, "operator", t rue);
220 static final Keyword PART = new Keyword.con2('PART', 42, "part", true);
221 static final Keyword SET = new Keyword.con2('SET', 43, "set", true);
222 static final Keyword STATIC = new Keyword.con2('STATIC', 44, "static", true);
223 static final Keyword TYPEDEF = new Keyword.con2('TYPEDEF', 45, "typedef", true );
224 static final List<Keyword> values = [ASSERT, BREAK, CASE, CATCH, CLASS, CONST, CONTINUE, DEFAULT, DO, ELSE, EXTENDS, FALSE, FINAL, FINALLY, FOR, IF, IN, IS, N EW, NULL, RETURN, SUPER, SWITCH, THIS, THROW, TRUE, TRY, VAR, VOID, WHILE, WITH, ABSTRACT, AS, DYNAMIC, EXPORT, EXTERNAL, FACTORY, GET, IMPLEMENTS, IMPORT, LIBR ARY, OPERATOR, PART, SET, STATIC, TYPEDEF];
225 String __name;
226 int __ordinal = 0;
227 int get ordinal => __ordinal;
228 /**
229 * The lexeme for the keyword.
230 */
231 String _syntax;
232 /**
233 * A flag indicating whether the keyword is a pseudo-keyword. Pseudo keywords can be used as
234 * identifiers.
235 */
236 bool _isPseudoKeyword2 = false;
237 /**
238 * A table mapping the lexemes of keywords to the corresponding keyword.
239 */
240 static Map<String, Keyword> keywords = createKeywordMap();
241 /**
242 * Create a table mapping the lexemes of keywords to the corresponding keyword .
243 * @return the table that was created
244 */
245 static Map<String, Keyword> createKeywordMap() {
246 LinkedHashMap<String, Keyword> result = new LinkedHashMap<String, Keyword>() ;
247 for (Keyword keyword in values) {
248 result[keyword._syntax] = keyword;
249 }
250 return result;
251 }
252 /**
253 * Initialize a newly created keyword to have the given syntax. The keyword is not a
254 * pseudo-keyword.
255 * @param syntax the lexeme for the keyword
256 */
257 Keyword.con1(String ___name, int ___ordinal, String syntax) {
258 _jtd_constructor_265_impl(___name, ___ordinal, syntax);
259 }
260 _jtd_constructor_265_impl(String ___name, int ___ordinal, String syntax) {
261 _jtd_constructor_266_impl(___name, ___ordinal, syntax, false);
262 }
263 /**
264 * Initialize a newly created keyword to have the given syntax. The keyword is a pseudo-keyword if
265 * the given flag is {@code true}.
266 * @param syntax the lexeme for the keyword
267 * @param isPseudoKeyword {@code true} if this keyword is a pseudo-keyword
268 */
269 Keyword.con2(String ___name, int ___ordinal, String syntax2, bool isPseudoKeyw ord) {
270 _jtd_constructor_266_impl(___name, ___ordinal, syntax2, isPseudoKeyword);
271 }
272 _jtd_constructor_266_impl(String ___name, int ___ordinal, String syntax2, bool isPseudoKeyword) {
273 __name = ___name;
274 __ordinal = ___ordinal;
275 this._syntax = syntax2;
276 this._isPseudoKeyword2 = isPseudoKeyword;
277 }
278 /**
279 * Return the lexeme for the keyword.
280 * @return the lexeme for the keyword
281 */
282 String get syntax => _syntax;
283 /**
284 * Return {@code true} if this keyword is a pseudo-keyword. Pseudo keywords ca n be used as
285 * identifiers.
286 * @return {@code true} if this keyword is a pseudo-keyword
287 */
288 bool isPseudoKeyword() => _isPseudoKeyword2;
289 String toString() => __name;
290 }
291 /**
292 * The abstract class {@code AbstractScanner} implements a scanner for Dart code . Subclasses are
293 * required to implement the interface used to access the characters being scann ed.
294 * <p>
295 * The lexical structure of Dart is ambiguous without knowledge of the context i n which a token is
296 * being scanned. For example, without context we cannot determine whether sourc e of the form "<<"
297 * should be scanned as a single left-shift operator or as two left angle bracke ts. This scanner
298 * does not have any context, so it always resolves such conflicts by scanning t he longest possible
299 * token.
300 * @coverage dart.engine.parser
301 */
302 abstract class AbstractScanner {
303 /**
304 * The source being scanned.
305 */
306 Source _source;
307 /**
308 * The error listener that will be informed of any errors that are found durin g the scan.
309 */
310 AnalysisErrorListener _errorListener;
311 /**
312 * The token pointing to the head of the linked list of tokens.
313 */
314 Token _tokens;
315 /**
316 * The last token that was scanned.
317 */
318 Token _tail;
319 /**
320 * The first token in the list of comment tokens found since the last non-comm ent token.
321 */
322 Token _firstComment;
323 /**
324 * The last token in the list of comment tokens found since the last non-comme nt token.
325 */
326 Token _lastComment;
327 /**
328 * The index of the first character of the current token.
329 */
330 int _tokenStart = 0;
331 /**
332 * A list containing the offsets of the first character of each line in the so urce code.
333 */
334 List<int> _lineStarts = new List<int>();
335 /**
336 * A list, treated something like a stack, of tokens representing the beginnin g of a matched pair.
337 * It is used to pair the end tokens with the begin tokens.
338 */
339 List<BeginToken> _groupingStack = new List<BeginToken>();
340 /**
341 * A flag indicating whether any unmatched groups were found during the parse.
342 */
343 bool _hasUnmatchedGroups2 = false;
344 /**
345 * A non-breaking space, which is allowed by this scanner as a white-space cha racter.
346 */
347 static int _$NBSP = 160;
348 /**
349 * Initialize a newly created scanner.
350 * @param source the source being scanned
351 * @param errorListener the error listener that will be informed of any errors that are found
352 */
353 AbstractScanner(Source source, AnalysisErrorListener errorListener) {
354 this._source = source;
355 this._errorListener = errorListener;
356 _tokens = new Token(TokenType.EOF, -1);
357 _tokens.setNext(_tokens);
358 _tail = _tokens;
359 _tokenStart = -1;
360 _lineStarts.add(0);
361 }
362 /**
363 * Return an array containing the offsets of the first character of each line in the source code.
364 * @return an array containing the offsets of the first character of each line in the source code
365 */
366 List<int> get lineStarts => _lineStarts;
367 /**
368 * Return the current offset relative to the beginning of the file. Return the initial offset if
369 * the scanner has not yet scanned the source code, and one (1) past the end o f the source code if
370 * the source code has been scanned.
371 * @return the current offset of the scanner in the source
372 */
373 int get offset;
374 /**
375 * Return {@code true} if any unmatched groups were found during the parse.
376 * @return {@code true} if any unmatched groups were found during the parse
377 */
378 bool hasUnmatchedGroups() => _hasUnmatchedGroups2;
379 /**
380 * Scan the source code to produce a list of tokens representing the source.
381 * @return the first token in the list of tokens that were produced
382 */
383 Token tokenize() {
384 int next = advance();
385 while (next != -1) {
386 next = bigSwitch(next);
387 }
388 appendEofToken();
389 return firstToken();
390 }
391 /**
392 * Advance the current position and return the character at the new current po sition.
393 * @return the character at the new current position
394 */
395 int advance();
396 /**
397 * Return the substring of the source code between the start offset and the mo dified current
398 * position. The current position is modified by adding the end delta.
399 * @param start the offset to the beginning of the string, relative to the sta rt of the file
400 * @param endDelta the number of character after the current location to be in cluded in the
401 * string, or the number of characters before the current location to be exclu ded if the
402 * offset is negative
403 * @return the specified substring of the source code
404 */
405 String getString(int start, int endDelta);
406 /**
407 * Return the character at the current position without changing the current p osition.
408 * @return the character at the current position
409 */
410 int peek();
411 /**
412 * Record the fact that we are at the beginning of a new line in the source.
413 */
414 void recordStartOfLine() {
415 _lineStarts.add(offset);
416 }
417 void appendBeginToken(TokenType type) {
418 BeginToken token;
419 if (_firstComment == null) {
420 token = new BeginToken(type, _tokenStart);
421 } else {
422 token = new BeginTokenWithComment(type, _tokenStart, _firstComment);
423 _firstComment = null;
424 _lastComment = null;
425 }
426 _tail = _tail.setNext(token);
427 _groupingStack.add(token);
428 }
429 void appendCommentToken(TokenType type, String value) {
430 if (_firstComment == null) {
431 _firstComment = new StringToken(type, value, _tokenStart);
432 _lastComment = _firstComment;
433 } else {
434 _lastComment = _lastComment.setNext(new StringToken(type, value, _tokenSta rt));
435 }
436 }
437 void appendEndToken(TokenType type32, TokenType beginType) {
438 Token token;
439 if (_firstComment == null) {
440 token = new Token(type32, _tokenStart);
441 } else {
442 token = new TokenWithComment(type32, _tokenStart, _firstComment);
443 _firstComment = null;
444 _lastComment = null;
445 }
446 _tail = _tail.setNext(token);
447 int last = _groupingStack.length - 1;
448 if (last >= 0) {
449 BeginToken begin = _groupingStack[last];
450 if (identical(begin.type, beginType)) {
451 begin.endToken = token;
452 _groupingStack.removeAt(last);
453 }
454 }
455 }
456 void appendEofToken() {
457 Token eofToken;
458 if (_firstComment == null) {
459 eofToken = new Token(TokenType.EOF, offset + 1);
460 } else {
461 eofToken = new TokenWithComment(TokenType.EOF, offset + 1, _firstComment);
462 _firstComment = null;
463 _lastComment = null;
464 }
465 eofToken.setNext(eofToken);
466 _tail = _tail.setNext(eofToken);
467 if (!_groupingStack.isEmpty) {
468 _hasUnmatchedGroups2 = true;
469 }
470 }
471 void appendKeywordToken(Keyword keyword) {
472 if (_firstComment == null) {
473 _tail = _tail.setNext(new KeywordToken(keyword, _tokenStart));
474 } else {
475 _tail = _tail.setNext(new KeywordTokenWithComment(keyword, _tokenStart, _f irstComment));
476 _firstComment = null;
477 _lastComment = null;
478 }
479 }
480 void appendStringToken(TokenType type, String value) {
481 if (_firstComment == null) {
482 _tail = _tail.setNext(new StringToken(type, value, _tokenStart));
483 } else {
484 _tail = _tail.setNext(new StringTokenWithComment(type, value, _tokenStart, _firstComment));
485 _firstComment = null;
486 _lastComment = null;
487 }
488 }
489 void appendStringToken2(TokenType type, String value, int offset) {
490 if (_firstComment == null) {
491 _tail = _tail.setNext(new StringToken(type, value, _tokenStart + offset));
492 } else {
493 _tail = _tail.setNext(new StringTokenWithComment(type, value, _tokenStart + offset, _firstComment));
494 _firstComment = null;
495 _lastComment = null;
496 }
497 }
498 void appendToken(TokenType type) {
499 if (_firstComment == null) {
500 _tail = _tail.setNext(new Token(type, _tokenStart));
501 } else {
502 _tail = _tail.setNext(new TokenWithComment(type, _tokenStart, _firstCommen t));
503 _firstComment = null;
504 _lastComment = null;
505 }
506 }
507 void appendToken2(TokenType type, int offset) {
508 if (_firstComment == null) {
509 _tail = _tail.setNext(new Token(type, offset));
510 } else {
511 _tail = _tail.setNext(new TokenWithComment(type, offset, _firstComment));
512 _firstComment = null;
513 _lastComment = null;
514 }
515 }
516 void beginToken() {
517 _tokenStart = offset;
518 }
519 int bigSwitch(int next) {
520 beginToken();
521 if (next == 0xD) {
522 next = advance();
523 if (next == 0xA) {
524 next = advance();
525 }
526 recordStartOfLine();
527 return next;
528 } else if (next == 0xA) {
529 recordStartOfLine();
530 return advance();
531 } else if (next == 0x9 || next == 0x20) {
532 return advance();
533 }
534 if (next == 0x72) {
535 int peek3 = peek();
536 if (peek3 == 0x22 || peek3 == 0x27) {
537 int start = offset;
538 return tokenizeString(advance(), start, true);
539 }
540 }
541 if (0x61 <= next && next <= 0x7A) {
542 return tokenizeKeywordOrIdentifier(next, true);
543 }
544 if ((0x41 <= next && next <= 0x5A) || next == 0x5F || next == 0x24) {
545 return tokenizeIdentifier(next, offset, true);
546 }
547 if (next == 0x3C) {
548 return tokenizeLessThan(next);
549 }
550 if (next == 0x3E) {
551 return tokenizeGreaterThan(next);
552 }
553 if (next == 0x3D) {
554 return tokenizeEquals(next);
555 }
556 if (next == 0x21) {
557 return tokenizeExclamation(next);
558 }
559 if (next == 0x2B) {
560 return tokenizePlus(next);
561 }
562 if (next == 0x2D) {
563 return tokenizeMinus(next);
564 }
565 if (next == 0x2A) {
566 return tokenizeMultiply(next);
567 }
568 if (next == 0x25) {
569 return tokenizePercent(next);
570 }
571 if (next == 0x26) {
572 return tokenizeAmpersand(next);
573 }
574 if (next == 0x7C) {
575 return tokenizeBar(next);
576 }
577 if (next == 0x5E) {
578 return tokenizeCaret(next);
579 }
580 if (next == 0x5B) {
581 return tokenizeOpenSquareBracket(next);
582 }
583 if (next == 0x7E) {
584 return tokenizeTilde(next);
585 }
586 if (next == 0x5C) {
587 appendToken(TokenType.BACKSLASH);
588 return advance();
589 }
590 if (next == 0x23) {
591 return tokenizeTag(next);
592 }
593 if (next == 0x28) {
594 appendBeginToken(TokenType.OPEN_PAREN);
595 return advance();
596 }
597 if (next == 0x29) {
598 appendEndToken(TokenType.CLOSE_PAREN, TokenType.OPEN_PAREN);
599 return advance();
600 }
601 if (next == 0x2C) {
602 appendToken(TokenType.COMMA);
603 return advance();
604 }
605 if (next == 0x3A) {
606 appendToken(TokenType.COLON);
607 return advance();
608 }
609 if (next == 0x3B) {
610 appendToken(TokenType.SEMICOLON);
611 return advance();
612 }
613 if (next == 0x3F) {
614 appendToken(TokenType.QUESTION);
615 return advance();
616 }
617 if (next == 0x5D) {
618 appendEndToken(TokenType.CLOSE_SQUARE_BRACKET, TokenType.OPEN_SQUARE_BRACK ET);
619 return advance();
620 }
621 if (next == 0x60) {
622 appendToken(TokenType.BACKPING);
623 return advance();
624 }
625 if (next == 0x7B) {
626 appendBeginToken(TokenType.OPEN_CURLY_BRACKET);
627 return advance();
628 }
629 if (next == 0x7D) {
630 appendEndToken(TokenType.CLOSE_CURLY_BRACKET, TokenType.OPEN_CURLY_BRACKET );
631 return advance();
632 }
633 if (next == 0x2F) {
634 return tokenizeSlashOrComment(next);
635 }
636 if (next == 0x40) {
637 appendToken(TokenType.AT);
638 return advance();
639 }
640 if (next == 0x22 || next == 0x27) {
641 return tokenizeString(next, offset, false);
642 }
643 if (next == 0x2E) {
644 return tokenizeDotOrNumber(next);
645 }
646 if (next == 0x30) {
647 return tokenizeHexOrNumber(next);
648 }
649 if (0x31 <= next && next <= 0x39) {
650 return tokenizeNumber(next);
651 }
652 if (next == -1) {
653 return -1;
654 }
655 if (Character.isLetter(next)) {
656 return tokenizeIdentifier(next, offset, true);
657 }
658 if (next == _$NBSP) {
659 return advance();
660 }
661 reportError(ScannerErrorCode.ILLEGAL_CHARACTER, [next]);
662 return advance();
663 }
664 /**
665 * Return the beginning token corresponding to a closing brace that was found while scanning
666 * inside a string interpolation expression. Tokens that cannot be matched wit h the closing brace
667 * will be dropped from the stack.
668 * @return the token to be paired with the closing brace
669 */
670 BeginToken findTokenMatchingClosingBraceInInterpolationExpression() {
671 int last = _groupingStack.length - 1;
672 while (last >= 0) {
673 BeginToken begin = _groupingStack[last];
674 if (identical(begin.type, TokenType.OPEN_CURLY_BRACKET) || identical(begin .type, TokenType.STRING_INTERPOLATION_EXPRESSION)) {
675 return begin;
676 }
677 _hasUnmatchedGroups2 = true;
678 _groupingStack.removeAt(last);
679 last--;
680 }
681 return null;
682 }
683 Token firstToken() => _tokens.next;
684 /**
685 * Return the source being scanned.
686 * @return the source being scanned
687 */
688 Source get source => _source;
689 /**
690 * Report an error at the current offset.
691 * @param errorCode the error code indicating the nature of the error
692 * @param arguments any arguments needed to complete the error message
693 */
694 void reportError(ScannerErrorCode errorCode, List<Object> arguments) {
695 _errorListener.onError(new AnalysisError.con2(source, offset, 1, errorCode, [arguments]));
696 }
697 int select(int choice, TokenType yesType, TokenType noType) {
698 int next = advance();
699 if (next == choice) {
700 appendToken(yesType);
701 return advance();
702 } else {
703 appendToken(noType);
704 return next;
705 }
706 }
707 int select2(int choice, TokenType yesType, TokenType noType, int offset) {
708 int next = advance();
709 if (next == choice) {
710 appendToken2(yesType, offset);
711 return advance();
712 } else {
713 appendToken2(noType, offset);
714 return next;
715 }
716 }
717 int tokenizeAmpersand(int next) {
718 next = advance();
719 if (next == 0x26) {
720 appendToken(TokenType.AMPERSAND_AMPERSAND);
721 return advance();
722 } else if (next == 0x3D) {
723 appendToken(TokenType.AMPERSAND_EQ);
724 return advance();
725 } else {
726 appendToken(TokenType.AMPERSAND);
727 return next;
728 }
729 }
730 int tokenizeBar(int next) {
731 next = advance();
732 if (next == 0x7C) {
733 appendToken(TokenType.BAR_BAR);
734 return advance();
735 } else if (next == 0x3D) {
736 appendToken(TokenType.BAR_EQ);
737 return advance();
738 } else {
739 appendToken(TokenType.BAR);
740 return next;
741 }
742 }
743 int tokenizeCaret(int next) => select(0x3D, TokenType.CARET_EQ, TokenType.CARE T);
744 int tokenizeDotOrNumber(int next) {
745 int start = offset;
746 next = advance();
747 if ((0x30 <= next && next <= 0x39)) {
748 return tokenizeFractionPart(next, start);
749 } else if (0x2E == next) {
750 return select(0x2E, TokenType.PERIOD_PERIOD_PERIOD, TokenType.PERIOD_PERIO D);
751 } else {
752 appendToken(TokenType.PERIOD);
753 return next;
754 }
755 }
756 int tokenizeEquals(int next) {
757 next = advance();
758 if (next == 0x3D) {
759 appendToken(TokenType.EQ_EQ);
760 return advance();
761 } else if (next == 0x3E) {
762 appendToken(TokenType.FUNCTION);
763 return advance();
764 }
765 appendToken(TokenType.EQ);
766 return next;
767 }
768 int tokenizeExclamation(int next) {
769 next = advance();
770 if (next == 0x3D) {
771 appendToken(TokenType.BANG_EQ);
772 return advance();
773 }
774 appendToken(TokenType.BANG);
775 return next;
776 }
777 int tokenizeExponent(int next) {
778 if (next == 0x2B || next == 0x2D) {
779 next = advance();
780 }
781 bool hasDigits = false;
782 while (true) {
783 if (0x30 <= next && next <= 0x39) {
784 hasDigits = true;
785 } else {
786 if (!hasDigits) {
787 reportError(ScannerErrorCode.MISSING_DIGIT, []);
788 }
789 return next;
790 }
791 next = advance();
792 }
793 }
794 int tokenizeFractionPart(int next, int start) {
795 bool done = false;
796 bool hasDigit = false;
797 LOOP: while (!done) {
798 if (0x30 <= next && next <= 0x39) {
799 hasDigit = true;
800 } else if (0x65 == next || 0x45 == next) {
801 hasDigit = true;
802 next = tokenizeExponent(advance());
803 done = true;
804 continue LOOP;
805 } else {
806 done = true;
807 continue LOOP;
808 }
809 next = advance();
810 }
811 if (!hasDigit) {
812 appendStringToken(TokenType.INT, getString(start, -2));
813 if (0x2E == next) {
814 return select2(0x2E, TokenType.PERIOD_PERIOD_PERIOD, TokenType.PERIOD_PE RIOD, offset - 1);
815 }
816 appendToken2(TokenType.PERIOD, offset - 1);
817 return bigSwitch(next);
818 }
819 if (next == 0x64 || next == 0x44) {
820 next = advance();
821 }
822 appendStringToken(TokenType.DOUBLE, getString(start, next < 0 ? 0 : -1));
823 return next;
824 }
825 int tokenizeGreaterThan(int next) {
826 next = advance();
827 if (0x3D == next) {
828 appendToken(TokenType.GT_EQ);
829 return advance();
830 } else if (0x3E == next) {
831 next = advance();
832 if (0x3D == next) {
833 appendToken(TokenType.GT_GT_EQ);
834 return advance();
835 } else {
836 appendToken(TokenType.GT_GT);
837 return next;
838 }
839 } else {
840 appendToken(TokenType.GT);
841 return next;
842 }
843 }
844 int tokenizeHex(int next) {
845 int start = offset - 1;
846 bool hasDigits = false;
847 while (true) {
848 next = advance();
849 if ((0x30 <= next && next <= 0x39) || (0x41 <= next && next <= 0x46) || (0 x61 <= next && next <= 0x66)) {
850 hasDigits = true;
851 } else {
852 if (!hasDigits) {
853 reportError(ScannerErrorCode.MISSING_HEX_DIGIT, []);
854 }
855 appendStringToken(TokenType.HEXADECIMAL, getString(start, next < 0 ? 0 : -1));
856 return next;
857 }
858 }
859 }
860 int tokenizeHexOrNumber(int next) {
861 int x = peek();
862 if (x == 0x78 || x == 0x58) {
863 advance();
864 return tokenizeHex(x);
865 }
866 return tokenizeNumber(next);
867 }
868 int tokenizeIdentifier(int next, int start, bool allowDollar) {
869 while ((0x61 <= next && next <= 0x7A) || (0x41 <= next && next <= 0x5A) || ( 0x30 <= next && next <= 0x39) || next == 0x5F || (next == 0x24 && allowDollar) | | Character.isLetterOrDigit(next)) {
870 next = advance();
871 }
872 appendStringToken(TokenType.IDENTIFIER, getString(start, next < 0 ? 0 : -1)) ;
873 return next;
874 }
875 int tokenizeInterpolatedExpression(int next, int start) {
876 appendBeginToken(TokenType.STRING_INTERPOLATION_EXPRESSION);
877 next = advance();
878 while (next != -1) {
879 if (next == 0x7D) {
880 BeginToken begin = findTokenMatchingClosingBraceInInterpolationExpressio n();
881 if (begin == null) {
882 beginToken();
883 appendToken(TokenType.CLOSE_CURLY_BRACKET);
884 next = advance();
885 beginToken();
886 return next;
887 } else if (identical(begin.type, TokenType.OPEN_CURLY_BRACKET)) {
888 beginToken();
889 appendEndToken(TokenType.CLOSE_CURLY_BRACKET, TokenType.OPEN_CURLY_BRA CKET);
890 next = advance();
891 beginToken();
892 } else if (identical(begin.type, TokenType.STRING_INTERPOLATION_EXPRESSI ON)) {
893 beginToken();
894 appendEndToken(TokenType.CLOSE_CURLY_BRACKET, TokenType.STRING_INTERPO LATION_EXPRESSION);
895 next = advance();
896 beginToken();
897 return next;
898 }
899 } else {
900 next = bigSwitch(next);
901 }
902 }
903 if (next == -1) {
904 return next;
905 }
906 next = advance();
907 beginToken();
908 return next;
909 }
910 int tokenizeInterpolatedIdentifier(int next, int start) {
911 appendStringToken2(TokenType.STRING_INTERPOLATION_IDENTIFIER, "\$", 0);
912 beginToken();
913 next = tokenizeKeywordOrIdentifier(next, false);
914 beginToken();
915 return next;
916 }
917 int tokenizeKeywordOrIdentifier(int next2, bool allowDollar) {
918 KeywordState state = KeywordState.KEYWORD_STATE;
919 int start = offset;
920 while (state != null && 0x61 <= next2 && next2 <= 0x7A) {
921 state = state.next((next2 as int));
922 next2 = advance();
923 }
924 if (state == null || state.keyword() == null) {
925 return tokenizeIdentifier(next2, start, allowDollar);
926 }
927 if ((0x41 <= next2 && next2 <= 0x5A) || (0x30 <= next2 && next2 <= 0x39) || next2 == 0x5F || next2 == 0x24) {
928 return tokenizeIdentifier(next2, start, allowDollar);
929 } else if (next2 < 128) {
930 appendKeywordToken(state.keyword());
931 return next2;
932 } else {
933 return tokenizeIdentifier(next2, start, allowDollar);
934 }
935 }
936 int tokenizeLessThan(int next) {
937 next = advance();
938 if (0x3D == next) {
939 appendToken(TokenType.LT_EQ);
940 return advance();
941 } else if (0x3C == next) {
942 return select(0x3D, TokenType.LT_LT_EQ, TokenType.LT_LT);
943 } else {
944 appendToken(TokenType.LT);
945 return next;
946 }
947 }
948 int tokenizeMinus(int next) {
949 next = advance();
950 if (next == 0x2D) {
951 appendToken(TokenType.MINUS_MINUS);
952 return advance();
953 } else if (next == 0x3D) {
954 appendToken(TokenType.MINUS_EQ);
955 return advance();
956 } else {
957 appendToken(TokenType.MINUS);
958 return next;
959 }
960 }
961 int tokenizeMultiLineComment(int next) {
962 int nesting = 1;
963 next = advance();
964 while (true) {
965 if (-1 == next) {
966 reportError(ScannerErrorCode.UNTERMINATED_MULTI_LINE_COMMENT, []);
967 appendCommentToken(TokenType.MULTI_LINE_COMMENT, getString(_tokenStart, 0));
968 return next;
969 } else if (0x2A == next) {
970 next = advance();
971 if (0x2F == next) {
972 --nesting;
973 if (0 == nesting) {
974 appendCommentToken(TokenType.MULTI_LINE_COMMENT, getString(_tokenSta rt, 0));
975 return advance();
976 } else {
977 next = advance();
978 }
979 }
980 } else if (0x2F == next) {
981 next = advance();
982 if (0x2A == next) {
983 next = advance();
984 ++nesting;
985 }
986 } else {
987 next = advance();
988 }
989 }
990 }
991 int tokenizeMultiLineRawString(int quoteChar, int start) {
992 int next = advance();
993 outer: while (next != -1) {
994 while (next != quoteChar) {
995 next = advance();
996 if (next == -1) {
997 break outer;
998 }
999 }
1000 next = advance();
1001 if (next == quoteChar) {
1002 next = advance();
1003 if (next == quoteChar) {
1004 appendStringToken(TokenType.STRING, getString(start, 0));
1005 return advance();
1006 }
1007 }
1008 }
1009 reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL, []);
1010 appendStringToken(TokenType.STRING, getString(start, 0));
1011 return advance();
1012 }
1013 int tokenizeMultiLineString(int quoteChar, int start, bool raw) {
1014 if (raw) {
1015 return tokenizeMultiLineRawString(quoteChar, start);
1016 }
1017 int next = advance();
1018 while (next != -1) {
1019 if (next == 0x24) {
1020 appendStringToken(TokenType.STRING, getString(start, -1));
1021 beginToken();
1022 next = tokenizeStringInterpolation(start);
1023 start = offset;
1024 continue;
1025 }
1026 if (next == quoteChar) {
1027 next = advance();
1028 if (next == quoteChar) {
1029 next = advance();
1030 if (next == quoteChar) {
1031 appendStringToken(TokenType.STRING, getString(start, 0));
1032 return advance();
1033 }
1034 }
1035 continue;
1036 }
1037 if (next == 0x5C) {
1038 next = advance();
1039 if (next == -1) {
1040 break;
1041 }
1042 }
1043 next = advance();
1044 }
1045 reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL, []);
1046 appendStringToken(TokenType.STRING, getString(start, 0));
1047 return advance();
1048 }
1049 int tokenizeMultiply(int next) => select(0x3D, TokenType.STAR_EQ, TokenType.ST AR);
1050 int tokenizeNumber(int next) {
1051 int start = offset;
1052 while (true) {
1053 next = advance();
1054 if (0x30 <= next && next <= 0x39) {
1055 continue;
1056 } else if (next == 0x2E) {
1057 return tokenizeFractionPart(advance(), start);
1058 } else if (next == 0x64 || next == 0x44) {
1059 appendStringToken(TokenType.DOUBLE, getString(start, 0));
1060 return advance();
1061 } else if (next == 0x65 || next == 0x45) {
1062 return tokenizeFractionPart(next, start);
1063 } else {
1064 appendStringToken(TokenType.INT, getString(start, next < 0 ? 0 : -1));
1065 return next;
1066 }
1067 }
1068 }
1069 int tokenizeOpenSquareBracket(int next) {
1070 next = advance();
1071 if (next == 0x5D) {
1072 return select(0x3D, TokenType.INDEX_EQ, TokenType.INDEX);
1073 } else {
1074 appendBeginToken(TokenType.OPEN_SQUARE_BRACKET);
1075 return next;
1076 }
1077 }
1078 int tokenizePercent(int next) => select(0x3D, TokenType.PERCENT_EQ, TokenType. PERCENT);
1079 int tokenizePlus(int next) {
1080 next = advance();
1081 if (0x2B == next) {
1082 appendToken(TokenType.PLUS_PLUS);
1083 return advance();
1084 } else if (0x3D == next) {
1085 appendToken(TokenType.PLUS_EQ);
1086 return advance();
1087 } else {
1088 appendToken(TokenType.PLUS);
1089 return next;
1090 }
1091 }
1092 int tokenizeSingleLineComment(int next) {
1093 while (true) {
1094 next = advance();
1095 if (0xA == next || 0xD == next || -1 == next) {
1096 appendCommentToken(TokenType.SINGLE_LINE_COMMENT, getString(_tokenStart, 0));
1097 return next;
1098 }
1099 }
1100 }
1101 int tokenizeSingleLineRawString(int next, int quoteChar, int start) {
1102 next = advance();
1103 while (next != -1) {
1104 if (next == quoteChar) {
1105 appendStringToken(TokenType.STRING, getString(start, 0));
1106 return advance();
1107 } else if (next == 0xD || next == 0xA) {
1108 reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL, []);
1109 appendStringToken(TokenType.STRING, getString(start, 0));
1110 return advance();
1111 }
1112 next = advance();
1113 }
1114 reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL, []);
1115 appendStringToken(TokenType.STRING, getString(start, 0));
1116 return advance();
1117 }
1118 int tokenizeSingleLineString(int next, int quoteChar, int start) {
1119 while (next != quoteChar) {
1120 if (next == 0x5C) {
1121 next = advance();
1122 } else if (next == 0x24) {
1123 appendStringToken(TokenType.STRING, getString(start, -1));
1124 beginToken();
1125 next = tokenizeStringInterpolation(start);
1126 start = offset;
1127 continue;
1128 }
1129 if (next <= 0xD && (next == 0xA || next == 0xD || next == -1)) {
1130 reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL, []);
1131 appendStringToken(TokenType.STRING, getString(start, 0));
1132 return advance();
1133 }
1134 next = advance();
1135 }
1136 appendStringToken(TokenType.STRING, getString(start, 0));
1137 return advance();
1138 }
1139 int tokenizeSlashOrComment(int next) {
1140 next = advance();
1141 if (0x2A == next) {
1142 return tokenizeMultiLineComment(next);
1143 } else if (0x2F == next) {
1144 return tokenizeSingleLineComment(next);
1145 } else if (0x3D == next) {
1146 appendToken(TokenType.SLASH_EQ);
1147 return advance();
1148 } else {
1149 appendToken(TokenType.SLASH);
1150 return next;
1151 }
1152 }
1153 int tokenizeString(int next, int start, bool raw) {
1154 int quoteChar = next;
1155 next = advance();
1156 if (quoteChar == next) {
1157 next = advance();
1158 if (quoteChar == next) {
1159 return tokenizeMultiLineString(quoteChar, start, raw);
1160 } else {
1161 appendStringToken(TokenType.STRING, getString(start, -1));
1162 return next;
1163 }
1164 }
1165 if (raw) {
1166 return tokenizeSingleLineRawString(next, quoteChar, start);
1167 } else {
1168 return tokenizeSingleLineString(next, quoteChar, start);
1169 }
1170 }
1171 int tokenizeStringInterpolation(int start) {
1172 beginToken();
1173 int next = advance();
1174 if (next == 0x7B) {
1175 return tokenizeInterpolatedExpression(next, start);
1176 } else {
1177 return tokenizeInterpolatedIdentifier(next, start);
1178 }
1179 }
1180 int tokenizeTag(int next) {
1181 if (offset == 0) {
1182 if (peek() == 0x21) {
1183 do {
1184 next = advance();
1185 } while (next != 0xA && next != 0xD && next > 0);
1186 appendStringToken(TokenType.SCRIPT_TAG, getString(_tokenStart, 0));
1187 return next;
1188 }
1189 }
1190 appendToken(TokenType.HASH);
1191 return advance();
1192 }
1193 int tokenizeTilde(int next) {
1194 next = advance();
1195 if (next == 0x2F) {
1196 return select(0x3D, TokenType.TILDE_SLASH_EQ, TokenType.TILDE_SLASH);
1197 } else {
1198 appendToken(TokenType.TILDE);
1199 return next;
1200 }
1201 }
1202 }
1203 /**
1204 * Instances of the class {@code StringToken} represent a token whose value is i ndependent of it's
1205 * type.
1206 * @coverage dart.engine.parser
1207 */
1208 class StringToken extends Token {
1209 /**
1210 * The lexeme represented by this token.
1211 */
1212 String _value2;
1213 /**
1214 * Initialize a newly created token to represent a token of the given type wit h the given value.
1215 * @param type the type of the token
1216 * @param value the lexeme represented by this token
1217 * @param offset the offset from the beginning of the file to the first charac ter in the token
1218 */
1219 StringToken(TokenType type, String value, int offset) : super(type, offset) {
1220 this._value2 = value;
1221 }
1222 String get lexeme => _value2;
1223 String value() => _value2;
1224 }
1225 /**
1226 * Instances of the class {@code CharBufferScanner} implement a scanner that rea ds from a character
1227 * buffer. The scanning logic is in the superclass.
1228 * @coverage dart.engine.parser
1229 */
1230 class CharBufferScanner extends AbstractScanner {
1231 /**
1232 * The buffer from which characters will be read.
1233 */
1234 CharBuffer _buffer;
1235 /**
1236 * The number of characters in the buffer.
1237 */
1238 int _bufferLength = 0;
1239 /**
1240 * The index of the last character that was read.
1241 */
1242 int _charOffset = 0;
1243 /**
1244 * Initialize a newly created scanner to scan the characters in the given char acter buffer.
1245 * @param source the source being scanned
1246 * @param buffer the buffer from which characters will be read
1247 * @param errorListener the error listener that will be informed of any errors that are found
1248 */
1249 CharBufferScanner(Source source, CharBuffer buffer, AnalysisErrorListener erro rListener) : super(source, errorListener) {
1250 this._buffer = buffer;
1251 this._bufferLength = buffer.length();
1252 this._charOffset = -1;
1253 }
1254 int get offset => _charOffset;
1255 int advance() {
1256 if (_charOffset + 1 >= _bufferLength) {
1257 return -1;
1258 }
1259 return _buffer.charAt(++_charOffset);
1260 }
1261 String getString(int start, int endDelta) => _buffer.subSequence(start, _charO ffset + 1 + endDelta).toString();
1262 int peek() {
1263 if (_charOffset + 1 >= _buffer.length()) {
1264 return -1;
1265 }
1266 return _buffer.charAt(_charOffset + 1);
1267 }
1268 }
1269 /**
1270 * Instances of the class {@code TokenWithComment} represent a normal token that is preceded by
1271 * comments.
1272 * @coverage dart.engine.parser
1273 */
1274 class TokenWithComment extends Token {
1275 /**
1276 * The first comment in the list of comments that precede this token.
1277 */
1278 Token _precedingComment;
1279 /**
1280 * Initialize a newly created token to have the given type and offset and to b e preceded by the
1281 * comments reachable from the given comment.
1282 * @param type the type of the token
1283 * @param offset the offset from the beginning of the file to the first charac ter in the token
1284 * @param precedingComment the first comment in the list of comments that prec ede this token
1285 */
1286 TokenWithComment(TokenType type, int offset, Token precedingComment) : super(t ype, offset) {
1287 this._precedingComment = precedingComment;
1288 }
1289 Token get precedingComments => _precedingComment;
1290 }
1291 /**
1292 * Instances of the class {@code Token} represent a token that was scanned from the input. Each
1293 * token knows which token follows it, acting as the head of a linked list of to kens.
1294 * @coverage dart.engine.parser
1295 */
1296 class Token {
1297 /**
1298 * The type of the token.
1299 */
1300 TokenType _type;
1301 /**
1302 * The offset from the beginning of the file to the first character in the tok en.
1303 */
1304 int _offset = 0;
1305 /**
1306 * The previous token in the token stream.
1307 */
1308 Token _previous;
1309 /**
1310 * The next token in the token stream.
1311 */
1312 Token _next;
1313 /**
1314 * Initialize a newly created token to have the given type and offset.
1315 * @param type the type of the token
1316 * @param offset the offset from the beginning of the file to the first charac ter in the token
1317 */
1318 Token(TokenType type, int offset) {
1319 this._type = type;
1320 this._offset = offset;
1321 }
1322 /**
1323 * Return the offset from the beginning of the file to the character after las t character of the
1324 * token.
1325 * @return the offset from the beginning of the file to the first character af ter last character
1326 * of the token
1327 */
1328 int get end => _offset + length;
1329 /**
1330 * Return the number of characters in the node's source range.
1331 * @return the number of characters in the node's source range
1332 */
1333 int get length => lexeme.length;
1334 /**
1335 * Return the lexeme that represents this token.
1336 * @return the lexeme that represents this token
1337 */
1338 String get lexeme => _type.lexeme;
1339 /**
1340 * Return the next token in the token stream.
1341 * @return the next token in the token stream
1342 */
1343 Token get next => _next;
1344 /**
1345 * Return the offset from the beginning of the file to the first character in the token.
1346 * @return the offset from the beginning of the file to the first character in the token
1347 */
1348 int get offset => _offset;
1349 /**
1350 * Return the first comment in the list of comments that precede this token, o r {@code null} if
1351 * there are no comments preceding this token. Additional comments can be reac hed by following the
1352 * token stream using {@link #getNext()} until {@code null} is returned.
1353 * @return the first comment in the list of comments that precede this token
1354 */
1355 Token get precedingComments => null;
1356 /**
1357 * Return the previous token in the token stream.
1358 * @return the previous token in the token stream
1359 */
1360 Token get previous => _previous;
1361 /**
1362 * Return the type of the token.
1363 * @return the type of the token
1364 */
1365 TokenType get type => _type;
1366 /**
1367 * Return {@code true} if this token represents an operator.
1368 * @return {@code true} if this token represents an operator
1369 */
1370 bool isOperator() => _type.isOperator();
1371 /**
1372 * Return {@code true} if this token is a synthetic token. A synthetic token i s a token that was
1373 * introduced by the parser in order to recover from an error in the code. Syn thetic tokens always
1374 * have a length of zero ({@code 0}).
1375 * @return {@code true} if this token is a synthetic token
1376 */
1377 bool isSynthetic() => length == 0;
1378 /**
1379 * Return {@code true} if this token represents an operator that can be define d by users.
1380 * @return {@code true} if this token represents an operator that can be defin ed by users
1381 */
1382 bool isUserDefinableOperator() => _type.isUserDefinableOperator();
1383 /**
1384 * Set the next token in the token stream to the given token. This has the sid e-effect of setting
1385 * this token to be the previous token for the given token.
1386 * @param token the next token in the token stream
1387 * @return the token that was passed in
1388 */
1389 Token setNext(Token token) {
1390 _next = token;
1391 token.previous = this;
1392 return token;
1393 }
1394 /**
1395 * Set the next token in the token stream to the given token without changing which token is the
1396 * previous token for the given token.
1397 * @param token the next token in the token stream
1398 * @return the token that was passed in
1399 */
1400 Token setNextWithoutSettingPrevious(Token token) {
1401 _next = token;
1402 return token;
1403 }
1404 /**
1405 * Set the offset from the beginning of the file to the first character in the token to the given
1406 * offset.
1407 * @param offset the offset from the beginning of the file to the first charac ter in the token
1408 */
1409 void set offset(int offset4) {
1410 this._offset = offset4;
1411 }
1412 String toString() => lexeme;
1413 /**
1414 * Return the value of this token. For keyword tokens, this is the keyword ass ociated with the
1415 * token, for other tokens it is the lexeme associated with the token.
1416 * @return the value of this token
1417 */
1418 Object value() => _type.lexeme;
1419 /**
1420 * Set the previous token in the token stream to the given token.
1421 * @param previous the previous token in the token stream
1422 */
1423 void set previous(Token previous3) {
1424 this._previous = previous3;
1425 }
1426 }
1427 /**
1428 * Instances of the class {@code StringScanner} implement a scanner that reads f rom a string. The
1429 * scanning logic is in the superclass.
1430 * @coverage dart.engine.parser
1431 */
1432 class StringScanner extends AbstractScanner {
1433 /**
1434 * The offset from the beginning of the file to the beginning of the source be ing scanned.
1435 */
1436 int _offsetDelta = 0;
1437 /**
1438 * The string from which characters will be read.
1439 */
1440 String _string;
1441 /**
1442 * The number of characters in the string.
1443 */
1444 int _stringLength = 0;
1445 /**
1446 * The index, relative to the string, of the last character that was read.
1447 */
1448 int _charOffset = 0;
1449 /**
1450 * Initialize a newly created scanner to scan the characters in the given stri ng.
1451 * @param source the source being scanned
1452 * @param string the string from which characters will be read
1453 * @param errorListener the error listener that will be informed of any errors that are found
1454 */
1455 StringScanner(Source source, String string, AnalysisErrorListener errorListene r) : super(source, errorListener) {
1456 this._offsetDelta = 0;
1457 this._string = string;
1458 this._stringLength = string.length;
1459 this._charOffset = -1;
1460 }
1461 int get offset => _offsetDelta + _charOffset;
1462 /**
1463 * Record that the source begins on the given line and column at the given off set. The line starts
1464 * for lines before the given line will not be correct.
1465 * <p>
1466 * This method must be invoked at most one time and must be invoked before sca nning begins. The
1467 * values provided must be sensible. The results are undefined if these condit ions are violated.
1468 * @param line the one-based index of the line containing the first character of the source
1469 * @param column the one-based index of the column in which the first characte r of the source
1470 * occurs
1471 * @param offset the zero-based offset from the beginning of the larger contex t to the first
1472 * character of the source
1473 */
1474 void setSourceStart(int line, int column, int offset) {
1475 if (line < 1 || column < 1 || offset < 0 || (line + column - 2) >= offset) {
1476 return;
1477 }
1478 _offsetDelta = 1;
1479 for (int i = 2; i < line; i++) {
1480 recordStartOfLine();
1481 }
1482 _offsetDelta = offset - column + 1;
1483 recordStartOfLine();
1484 _offsetDelta = offset;
1485 }
1486 int advance() {
1487 if (_charOffset + 1 >= _stringLength) {
1488 return -1;
1489 }
1490 return _string.codeUnitAt(++_charOffset);
1491 }
1492 String getString(int start, int endDelta) => _string.substring(start - _offset Delta, _charOffset + 1 + endDelta);
1493 int peek() {
1494 if (_charOffset + 1 >= _string.length) {
1495 return -1;
1496 }
1497 return _string.codeUnitAt(_charOffset + 1);
1498 }
1499 }
1500 /**
1501 * Instances of the class {@code BeginTokenWithComment} represent a begin token that is preceded by
1502 * comments.
1503 * @coverage dart.engine.parser
1504 */
1505 class BeginTokenWithComment extends BeginToken {
1506 /**
1507 * The first comment in the list of comments that precede this token.
1508 */
1509 Token _precedingComment;
1510 /**
1511 * Initialize a newly created token to have the given type and offset and to b e preceded by the
1512 * comments reachable from the given comment.
1513 * @param type the type of the token
1514 * @param offset the offset from the beginning of the file to the first charac ter in the token
1515 * @param precedingComment the first comment in the list of comments that prec ede this token
1516 */
1517 BeginTokenWithComment(TokenType type, int offset, Token precedingComment) : su per(type, offset) {
1518 this._precedingComment = precedingComment;
1519 }
1520 Token get precedingComments => _precedingComment;
1521 }
1522 /**
1523 * Instances of the class {@code KeywordToken} represent a keyword in the langua ge.
1524 * @coverage dart.engine.parser
1525 */
1526 class KeywordToken extends Token {
1527 /**
1528 * The keyword being represented by this token.
1529 */
1530 Keyword _keyword;
1531 /**
1532 * Initialize a newly created token to represent the given keyword.
1533 * @param keyword the keyword being represented by this token
1534 * @param offset the offset from the beginning of the file to the first charac ter in the token
1535 */
1536 KeywordToken(Keyword keyword, int offset) : super(TokenType.KEYWORD, offset) {
1537 this._keyword = keyword;
1538 }
1539 /**
1540 * Return the keyword being represented by this token.
1541 * @return the keyword being represented by this token
1542 */
1543 Keyword get keyword => _keyword;
1544 String get lexeme => _keyword.syntax;
1545 Keyword value() => _keyword;
1546 }
1547 /**
1548 * Instances of the class {@code BeginToken} represent the opening half of a gro uping pair of
1549 * tokens. This is used for curly brackets ('{'), parentheses ('('), and square brackets ('[').
1550 * @coverage dart.engine.parser
1551 */
1552 class BeginToken extends Token {
1553 /**
1554 * The token that corresponds to this token.
1555 */
1556 Token _endToken;
1557 /**
1558 * Initialize a newly created token representing the opening half of a groupin g pair of tokens.
1559 * @param type the type of the token
1560 * @param offset the offset from the beginning of the file to the first charac ter in the token
1561 */
1562 BeginToken(TokenType type, int offset) : super(type, offset) {
1563 assert((identical(type, TokenType.OPEN_CURLY_BRACKET) || identical(type, Tok enType.OPEN_PAREN) || identical(type, TokenType.OPEN_SQUARE_BRACKET) || identica l(type, TokenType.STRING_INTERPOLATION_EXPRESSION)));
1564 }
1565 /**
1566 * Return the token that corresponds to this token.
1567 * @return the token that corresponds to this token
1568 */
1569 Token get endToken => _endToken;
1570 /**
1571 * Set the token that corresponds to this token to the given token.
1572 * @param token the token that corresponds to this token
1573 */
1574 void set endToken(Token token) {
1575 this._endToken = token;
1576 }
1577 }
1578 /**
1579 * The enumeration {@code TokenClass} represents classes (or groups) of tokens w ith a similar use.
1580 * @coverage dart.engine.parser
1581 */
1582 class TokenClass {
1583 /**
1584 * A value used to indicate that the token type is not part of any specific cl ass of token.
1585 */
1586 static final TokenClass NO_CLASS = new TokenClass.con1('NO_CLASS', 0);
1587 /**
1588 * A value used to indicate that the token type is an additive operator.
1589 */
1590 static final TokenClass ADDITIVE_OPERATOR = new TokenClass.con2('ADDITIVE_OPER ATOR', 1, 12);
1591 /**
1592 * A value used to indicate that the token type is an assignment operator.
1593 */
1594 static final TokenClass ASSIGNMENT_OPERATOR = new TokenClass.con2('ASSIGNMENT_ OPERATOR', 2, 1);
1595 /**
1596 * A value used to indicate that the token type is a bitwise-and operator.
1597 */
1598 static final TokenClass BITWISE_AND_OPERATOR = new TokenClass.con2('BITWISE_AN D_OPERATOR', 3, 8);
1599 /**
1600 * A value used to indicate that the token type is a bitwise-or operator.
1601 */
1602 static final TokenClass BITWISE_OR_OPERATOR = new TokenClass.con2('BITWISE_OR_ OPERATOR', 4, 6);
1603 /**
1604 * A value used to indicate that the token type is a bitwise-xor operator.
1605 */
1606 static final TokenClass BITWISE_XOR_OPERATOR = new TokenClass.con2('BITWISE_XO R_OPERATOR', 5, 7);
1607 /**
1608 * A value used to indicate that the token type is a cascade operator.
1609 */
1610 static final TokenClass CASCADE_OPERATOR = new TokenClass.con2('CASCADE_OPERAT OR', 6, 2);
1611 /**
1612 * A value used to indicate that the token type is a conditional operator.
1613 */
1614 static final TokenClass CONDITIONAL_OPERATOR = new TokenClass.con2('CONDITIONA L_OPERATOR', 7, 3);
1615 /**
1616 * A value used to indicate that the token type is an equality operator.
1617 */
1618 static final TokenClass EQUALITY_OPERATOR = new TokenClass.con2('EQUALITY_OPER ATOR', 8, 9);
1619 /**
1620 * A value used to indicate that the token type is a logical-and operator.
1621 */
1622 static final TokenClass LOGICAL_AND_OPERATOR = new TokenClass.con2('LOGICAL_AN D_OPERATOR', 9, 5);
1623 /**
1624 * A value used to indicate that the token type is a logical-or operator.
1625 */
1626 static final TokenClass LOGICAL_OR_OPERATOR = new TokenClass.con2('LOGICAL_OR_ OPERATOR', 10, 4);
1627 /**
1628 * A value used to indicate that the token type is a multiplicative operator.
1629 */
1630 static final TokenClass MULTIPLICATIVE_OPERATOR = new TokenClass.con2('MULTIPL ICATIVE_OPERATOR', 11, 13);
1631 /**
1632 * A value used to indicate that the token type is a relational operator.
1633 */
1634 static final TokenClass RELATIONAL_OPERATOR = new TokenClass.con2('RELATIONAL_ OPERATOR', 12, 10);
1635 /**
1636 * A value used to indicate that the token type is a shift operator.
1637 */
1638 static final TokenClass SHIFT_OPERATOR = new TokenClass.con2('SHIFT_OPERATOR', 13, 11);
1639 /**
1640 * A value used to indicate that the token type is a unary operator.
1641 */
1642 static final TokenClass UNARY_POSTFIX_OPERATOR = new TokenClass.con2('UNARY_PO STFIX_OPERATOR', 14, 15);
1643 /**
1644 * A value used to indicate that the token type is a unary operator.
1645 */
1646 static final TokenClass UNARY_PREFIX_OPERATOR = new TokenClass.con2('UNARY_PRE FIX_OPERATOR', 15, 14);
1647 static final List<TokenClass> values = [NO_CLASS, ADDITIVE_OPERATOR, ASSIGNMEN T_OPERATOR, BITWISE_AND_OPERATOR, BITWISE_OR_OPERATOR, BITWISE_XOR_OPERATOR, CAS CADE_OPERATOR, CONDITIONAL_OPERATOR, EQUALITY_OPERATOR, LOGICAL_AND_OPERATOR, LO GICAL_OR_OPERATOR, MULTIPLICATIVE_OPERATOR, RELATIONAL_OPERATOR, SHIFT_OPERATOR, UNARY_POSTFIX_OPERATOR, UNARY_PREFIX_OPERATOR];
1648 String __name;
1649 int __ordinal = 0;
1650 int get ordinal => __ordinal;
1651 /**
1652 * The precedence of tokens of this class, or {@code 0} if the such tokens do not represent an
1653 * operator.
1654 */
1655 int _precedence = 0;
1656 TokenClass.con1(String ___name, int ___ordinal) {
1657 _jtd_constructor_275_impl(___name, ___ordinal);
1658 }
1659 _jtd_constructor_275_impl(String ___name, int ___ordinal) {
1660 _jtd_constructor_276_impl(___name, ___ordinal, 0);
1661 }
1662 TokenClass.con2(String ___name, int ___ordinal, int precedence2) {
1663 _jtd_constructor_276_impl(___name, ___ordinal, precedence2);
1664 }
1665 _jtd_constructor_276_impl(String ___name, int ___ordinal, int precedence2) {
1666 __name = ___name;
1667 __ordinal = ___ordinal;
1668 this._precedence = precedence2;
1669 }
1670 /**
1671 * Return the precedence of tokens of this class, or {@code 0} if the such tok ens do not represent
1672 * an operator.
1673 * @return the precedence of tokens of this class
1674 */
1675 int get precedence => _precedence;
1676 String toString() => __name;
1677 }
1678 /**
1679 * Instances of the class {@code KeywordTokenWithComment} implement a keyword to ken that is preceded
1680 * by comments.
1681 * @coverage dart.engine.parser
1682 */
1683 class KeywordTokenWithComment extends KeywordToken {
1684 /**
1685 * The first comment in the list of comments that precede this token.
1686 */
1687 Token _precedingComment;
1688 /**
1689 * Initialize a newly created token to to represent the given keyword and to b e preceded by the
1690 * comments reachable from the given comment.
1691 * @param keyword the keyword being represented by this token
1692 * @param offset the offset from the beginning of the file to the first charac ter in the token
1693 * @param precedingComment the first comment in the list of comments that prec ede this token
1694 */
1695 KeywordTokenWithComment(Keyword keyword, int offset, Token precedingComment) : super(keyword, offset) {
1696 this._precedingComment = precedingComment;
1697 }
1698 Token get precedingComments => _precedingComment;
1699 }
1700 /**
1701 * The enumeration {@code TokenType} defines the types of tokens that can be ret urned by the
1702 * scanner.
1703 * @coverage dart.engine.parser
1704 */
1705 class TokenType {
1706 /**
1707 * The type of the token that marks the end of the input.
1708 */
1709 static final TokenType EOF = new TokenType_EOF('EOF', 0, null, "");
1710 static final TokenType DOUBLE = new TokenType.con1('DOUBLE', 1);
1711 static final TokenType HEXADECIMAL = new TokenType.con1('HEXADECIMAL', 2);
1712 static final TokenType IDENTIFIER = new TokenType.con1('IDENTIFIER', 3);
1713 static final TokenType INT = new TokenType.con1('INT', 4);
1714 static final TokenType KEYWORD = new TokenType.con1('KEYWORD', 5);
1715 static final TokenType MULTI_LINE_COMMENT = new TokenType.con1('MULTI_LINE_COM MENT', 6);
1716 static final TokenType SCRIPT_TAG = new TokenType.con1('SCRIPT_TAG', 7);
1717 static final TokenType SINGLE_LINE_COMMENT = new TokenType.con1('SINGLE_LINE_C OMMENT', 8);
1718 static final TokenType STRING = new TokenType.con1('STRING', 9);
1719 static final TokenType AMPERSAND = new TokenType.con2('AMPERSAND', 10, TokenCl ass.BITWISE_AND_OPERATOR, "&");
1720 static final TokenType AMPERSAND_AMPERSAND = new TokenType.con2('AMPERSAND_AMP ERSAND', 11, TokenClass.LOGICAL_AND_OPERATOR, "&&");
1721 static final TokenType AMPERSAND_EQ = new TokenType.con2('AMPERSAND_EQ', 12, T okenClass.ASSIGNMENT_OPERATOR, "&=");
1722 static final TokenType AT = new TokenType.con2('AT', 13, null, "@");
1723 static final TokenType BANG = new TokenType.con2('BANG', 14, TokenClass.UNARY_ PREFIX_OPERATOR, "!");
1724 static final TokenType BANG_EQ = new TokenType.con2('BANG_EQ', 15, TokenClass. EQUALITY_OPERATOR, "!=");
1725 static final TokenType BAR = new TokenType.con2('BAR', 16, TokenClass.BITWISE_ OR_OPERATOR, "|");
1726 static final TokenType BAR_BAR = new TokenType.con2('BAR_BAR', 17, TokenClass. LOGICAL_OR_OPERATOR, "||");
1727 static final TokenType BAR_EQ = new TokenType.con2('BAR_EQ', 18, TokenClass.AS SIGNMENT_OPERATOR, "|=");
1728 static final TokenType COLON = new TokenType.con2('COLON', 19, null, ":");
1729 static final TokenType COMMA = new TokenType.con2('COMMA', 20, null, ",");
1730 static final TokenType CARET = new TokenType.con2('CARET', 21, TokenClass.BITW ISE_XOR_OPERATOR, "^");
1731 static final TokenType CARET_EQ = new TokenType.con2('CARET_EQ', 22, TokenClas s.ASSIGNMENT_OPERATOR, "^=");
1732 static final TokenType CLOSE_CURLY_BRACKET = new TokenType.con2('CLOSE_CURLY_B RACKET', 23, null, "}");
1733 static final TokenType CLOSE_PAREN = new TokenType.con2('CLOSE_PAREN', 24, nul l, ")");
1734 static final TokenType CLOSE_SQUARE_BRACKET = new TokenType.con2('CLOSE_SQUARE _BRACKET', 25, null, "]");
1735 static final TokenType EQ = new TokenType.con2('EQ', 26, TokenClass.ASSIGNMENT _OPERATOR, "=");
1736 static final TokenType EQ_EQ = new TokenType.con2('EQ_EQ', 27, TokenClass.EQUA LITY_OPERATOR, "==");
1737 static final TokenType FUNCTION = new TokenType.con2('FUNCTION', 28, null, "=> ");
1738 static final TokenType GT = new TokenType.con2('GT', 29, TokenClass.RELATIONAL _OPERATOR, ">");
1739 static final TokenType GT_EQ = new TokenType.con2('GT_EQ', 30, TokenClass.RELA TIONAL_OPERATOR, ">=");
1740 static final TokenType GT_GT = new TokenType.con2('GT_GT', 31, TokenClass.SHIF T_OPERATOR, ">>");
1741 static final TokenType GT_GT_EQ = new TokenType.con2('GT_GT_EQ', 32, TokenClas s.ASSIGNMENT_OPERATOR, ">>=");
1742 static final TokenType HASH = new TokenType.con2('HASH', 33, null, "#");
1743 static final TokenType INDEX = new TokenType.con2('INDEX', 34, TokenClass.UNAR Y_POSTFIX_OPERATOR, "[]");
1744 static final TokenType INDEX_EQ = new TokenType.con2('INDEX_EQ', 35, TokenClas s.UNARY_POSTFIX_OPERATOR, "[]=");
1745 static final TokenType IS = new TokenType.con2('IS', 36, TokenClass.RELATIONAL _OPERATOR, "is");
1746 static final TokenType LT = new TokenType.con2('LT', 37, TokenClass.RELATIONAL _OPERATOR, "<");
1747 static final TokenType LT_EQ = new TokenType.con2('LT_EQ', 38, TokenClass.RELA TIONAL_OPERATOR, "<=");
1748 static final TokenType LT_LT = new TokenType.con2('LT_LT', 39, TokenClass.SHIF T_OPERATOR, "<<");
1749 static final TokenType LT_LT_EQ = new TokenType.con2('LT_LT_EQ', 40, TokenClas s.ASSIGNMENT_OPERATOR, "<<=");
1750 static final TokenType MINUS = new TokenType.con2('MINUS', 41, TokenClass.ADDI TIVE_OPERATOR, "-");
1751 static final TokenType MINUS_EQ = new TokenType.con2('MINUS_EQ', 42, TokenClas s.ASSIGNMENT_OPERATOR, "-=");
1752 static final TokenType MINUS_MINUS = new TokenType.con2('MINUS_MINUS', 43, Tok enClass.UNARY_PREFIX_OPERATOR, "--");
1753 static final TokenType OPEN_CURLY_BRACKET = new TokenType.con2('OPEN_CURLY_BRA CKET', 44, null, "{");
1754 static final TokenType OPEN_PAREN = new TokenType.con2('OPEN_PAREN', 45, Token Class.UNARY_POSTFIX_OPERATOR, "(");
1755 static final TokenType OPEN_SQUARE_BRACKET = new TokenType.con2('OPEN_SQUARE_B RACKET', 46, TokenClass.UNARY_POSTFIX_OPERATOR, "[");
1756 static final TokenType PERCENT = new TokenType.con2('PERCENT', 47, TokenClass. MULTIPLICATIVE_OPERATOR, "%");
1757 static final TokenType PERCENT_EQ = new TokenType.con2('PERCENT_EQ', 48, Token Class.ASSIGNMENT_OPERATOR, "%=");
1758 static final TokenType PERIOD = new TokenType.con2('PERIOD', 49, TokenClass.UN ARY_POSTFIX_OPERATOR, ".");
1759 static final TokenType PERIOD_PERIOD = new TokenType.con2('PERIOD_PERIOD', 50, TokenClass.CASCADE_OPERATOR, "..");
1760 static final TokenType PLUS = new TokenType.con2('PLUS', 51, TokenClass.ADDITI VE_OPERATOR, "+");
1761 static final TokenType PLUS_EQ = new TokenType.con2('PLUS_EQ', 52, TokenClass. ASSIGNMENT_OPERATOR, "+=");
1762 static final TokenType PLUS_PLUS = new TokenType.con2('PLUS_PLUS', 53, TokenCl ass.UNARY_PREFIX_OPERATOR, "++");
1763 static final TokenType QUESTION = new TokenType.con2('QUESTION', 54, TokenClas s.CONDITIONAL_OPERATOR, "?");
1764 static final TokenType SEMICOLON = new TokenType.con2('SEMICOLON', 55, null, " ;");
1765 static final TokenType SLASH = new TokenType.con2('SLASH', 56, TokenClass.MULT IPLICATIVE_OPERATOR, "/");
1766 static final TokenType SLASH_EQ = new TokenType.con2('SLASH_EQ', 57, TokenClas s.ASSIGNMENT_OPERATOR, "/=");
1767 static final TokenType STAR = new TokenType.con2('STAR', 58, TokenClass.MULTIP LICATIVE_OPERATOR, "*");
1768 static final TokenType STAR_EQ = new TokenType.con2('STAR_EQ', 59, TokenClass. ASSIGNMENT_OPERATOR, "*=");
1769 static final TokenType STRING_INTERPOLATION_EXPRESSION = new TokenType.con2('S TRING_INTERPOLATION_EXPRESSION', 60, null, "\${");
1770 static final TokenType STRING_INTERPOLATION_IDENTIFIER = new TokenType.con2('S TRING_INTERPOLATION_IDENTIFIER', 61, null, "\$");
1771 static final TokenType TILDE = new TokenType.con2('TILDE', 62, TokenClass.UNAR Y_PREFIX_OPERATOR, "~");
1772 static final TokenType TILDE_SLASH = new TokenType.con2('TILDE_SLASH', 63, Tok enClass.MULTIPLICATIVE_OPERATOR, "~/");
1773 static final TokenType TILDE_SLASH_EQ = new TokenType.con2('TILDE_SLASH_EQ', 6 4, TokenClass.ASSIGNMENT_OPERATOR, "~/=");
1774 static final TokenType BACKPING = new TokenType.con2('BACKPING', 65, null, "`" );
1775 static final TokenType BACKSLASH = new TokenType.con2('BACKSLASH', 66, null, " \\");
1776 static final TokenType PERIOD_PERIOD_PERIOD = new TokenType.con2('PERIOD_PERIO D_PERIOD', 67, null, "...");
1777 static final List<TokenType> values = [EOF, DOUBLE, HEXADECIMAL, IDENTIFIER, I NT, KEYWORD, MULTI_LINE_COMMENT, SCRIPT_TAG, SINGLE_LINE_COMMENT, STRING, AMPERS AND, AMPERSAND_AMPERSAND, AMPERSAND_EQ, AT, BANG, BANG_EQ, BAR, BAR_BAR, BAR_EQ, COLON, COMMA, CARET, CARET_EQ, CLOSE_CURLY_BRACKET, CLOSE_PAREN, CLOSE_SQUARE_B RACKET, EQ, EQ_EQ, FUNCTION, GT, GT_EQ, GT_GT, GT_GT_EQ, HASH, INDEX, INDEX_EQ, IS, LT, LT_EQ, LT_LT, LT_LT_EQ, MINUS, MINUS_EQ, MINUS_MINUS, OPEN_CURLY_BRACKET , OPEN_PAREN, OPEN_SQUARE_BRACKET, PERCENT, PERCENT_EQ, PERIOD, PERIOD_PERIOD, P LUS, PLUS_EQ, PLUS_PLUS, QUESTION, SEMICOLON, SLASH, SLASH_EQ, STAR, STAR_EQ, ST RING_INTERPOLATION_EXPRESSION, STRING_INTERPOLATION_IDENTIFIER, TILDE, TILDE_SLA SH, TILDE_SLASH_EQ, BACKPING, BACKSLASH, PERIOD_PERIOD_PERIOD];
1778 String __name;
1779 int __ordinal = 0;
1780 int get ordinal => __ordinal;
1781 /**
1782 * The class of the token.
1783 */
1784 TokenClass _tokenClass;
1785 /**
1786 * The lexeme that defines this type of token, or {@code null} if there is mor e than one possible
1787 * lexeme for this type of token.
1788 */
1789 String _lexeme;
1790 TokenType.con1(String ___name, int ___ordinal) {
1791 _jtd_constructor_277_impl(___name, ___ordinal);
1792 }
1793 _jtd_constructor_277_impl(String ___name, int ___ordinal) {
1794 _jtd_constructor_278_impl(___name, ___ordinal, TokenClass.NO_CLASS, null);
1795 }
1796 TokenType.con2(String ___name, int ___ordinal, TokenClass tokenClass2, String lexeme2) {
1797 _jtd_constructor_278_impl(___name, ___ordinal, tokenClass2, lexeme2);
1798 }
1799 _jtd_constructor_278_impl(String ___name, int ___ordinal, TokenClass tokenClas s2, String lexeme2) {
1800 __name = ___name;
1801 __ordinal = ___ordinal;
1802 this._tokenClass = tokenClass2 == null ? TokenClass.NO_CLASS : tokenClass2;
1803 this._lexeme = lexeme2;
1804 }
1805 /**
1806 * Return the lexeme that defines this type of token, or {@code null} if there is more than one
1807 * possible lexeme for this type of token.
1808 * @return the lexeme that defines this type of token
1809 */
1810 String get lexeme => _lexeme;
1811 /**
1812 * Return the precedence of the token, or {@code 0} if the token does not repr esent an operator.
1813 * @return the precedence of the token
1814 */
1815 int get precedence => _tokenClass.precedence;
1816 /**
1817 * Return {@code true} if this type of token represents an additive operator.
1818 * @return {@code true} if this type of token represents an additive operator
1819 */
1820 bool isAdditiveOperator() => identical(_tokenClass, TokenClass.ADDITIVE_OPERAT OR);
1821 /**
1822 * Return {@code true} if this type of token represents an assignment operator .
1823 * @return {@code true} if this type of token represents an assignment operato r
1824 */
1825 bool isAssignmentOperator() => identical(_tokenClass, TokenClass.ASSIGNMENT_OP ERATOR);
1826 /**
1827 * Return {@code true} if this type of token represents an associative operato r. An associative
1828 * operator is an operator for which the following equality is true:{@code (a * b) * c == a * (b * c)}. In other words, if the result of applying the operator to
1829 * multiple operands does not depend on the order in which those applications occur.
1830 * <p>
1831 * Note: This method considers the logical-and and logical-or operators to be associative, even
1832 * though the order in which the application of those operators can have an ef fect because
1833 * evaluation of the right-hand operand is conditional.
1834 * @return {@code true} if this type of token represents an associative operat or
1835 */
1836 bool isAssociativeOperator() => identical(this, AMPERSAND) || identical(this, AMPERSAND_AMPERSAND) || identical(this, BAR) || identical(this, BAR_BAR) || iden tical(this, CARET) || identical(this, PLUS) || identical(this, STAR);
1837 /**
1838 * Return {@code true} if this type of token represents an equality operator.
1839 * @return {@code true} if this type of token represents an equality operator
1840 */
1841 bool isEqualityOperator() => identical(_tokenClass, TokenClass.EQUALITY_OPERAT OR);
1842 /**
1843 * Return {@code true} if this type of token represents an increment operator.
1844 * @return {@code true} if this type of token represents an increment operator
1845 */
1846 bool isIncrementOperator() => identical(_lexeme, "++") || identical(_lexeme, " --");
1847 /**
1848 * Return {@code true} if this type of token represents a multiplicative opera tor.
1849 * @return {@code true} if this type of token represents a multiplicative oper ator
1850 */
1851 bool isMultiplicativeOperator() => identical(_tokenClass, TokenClass.MULTIPLIC ATIVE_OPERATOR);
1852 /**
1853 * Return {@code true} if this token type represents an operator.
1854 * @return {@code true} if this token type represents an operator
1855 */
1856 bool isOperator() => _tokenClass != TokenClass.NO_CLASS && this != OPEN_PAREN && this != OPEN_SQUARE_BRACKET && this != PERIOD;
1857 /**
1858 * Return {@code true} if this type of token represents a relational operator.
1859 * @return {@code true} if this type of token represents a relational operator
1860 */
1861 bool isRelationalOperator() => identical(_tokenClass, TokenClass.RELATIONAL_OP ERATOR);
1862 /**
1863 * Return {@code true} if this type of token represents a shift operator.
1864 * @return {@code true} if this type of token represents a shift operator
1865 */
1866 bool isShiftOperator() => identical(_tokenClass, TokenClass.SHIFT_OPERATOR);
1867 /**
1868 * Return {@code true} if this type of token represents a unary postfix operat or.
1869 * @return {@code true} if this type of token represents a unary postfix opera tor
1870 */
1871 bool isUnaryPostfixOperator() => identical(_tokenClass, TokenClass.UNARY_POSTF IX_OPERATOR);
1872 /**
1873 * Return {@code true} if this type of token represents a unary prefix operato r.
1874 * @return {@code true} if this type of token represents a unary prefix operat or
1875 */
1876 bool isUnaryPrefixOperator() => identical(_tokenClass, TokenClass.UNARY_PREFIX _OPERATOR);
1877 /**
1878 * Return {@code true} if this token type represents an operator that can be d efined by users.
1879 * @return {@code true} if this token type represents an operator that can be defined by users
1880 */
1881 bool isUserDefinableOperator() => identical(_lexeme, "==") || identical(_lexem e, "~") || identical(_lexeme, "[]") || identical(_lexeme, "[]=") || identical(_l exeme, "*") || identical(_lexeme, "/") || identical(_lexeme, "%") || identical(_ lexeme, "~/") || identical(_lexeme, "+") || identical(_lexeme, "-") || identical (_lexeme, "<<") || identical(_lexeme, ">>") || identical(_lexeme, ">=") || ident ical(_lexeme, ">") || identical(_lexeme, "<=") || identical(_lexeme, "<") || ide ntical(_lexeme, "&") || identical(_lexeme, "^") || identical(_lexeme, "|");
1882 String toString() => __name;
1883 }
1884 class TokenType_EOF extends TokenType {
1885 TokenType_EOF(String ___name, int ___ordinal, TokenClass arg0, String arg1) : super.con2(___name, ___ordinal, arg0, arg1);
1886 String toString() => "-eof-";
1887 }
OLDNEW
« no previous file with comments | « pkg/analyzer-experimental/lib/src/generated/resolver.dart ('k') | pkg/analyzer-experimental/lib/src/generated/sdk.dart » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698