| OLD | NEW |
| (Empty) |
| 1 // This code was auto-generated, is not intended to be edited, and is subject to | |
| 2 // significant change. Please see the README file for more information. | |
| 3 library engine.scanner; | |
| 4 import 'dart:collection'; | |
| 5 import 'java_core.dart'; | |
| 6 import 'java_engine.dart'; | |
| 7 import 'source.dart'; | |
| 8 import 'error.dart'; | |
| 9 import 'instrumentation.dart'; | |
| 10 import 'utilities_collection.dart' show TokenMap; | |
| 11 /** | |
| 12 * Instances of the abstract class `KeywordState` represent a state in a state m
achine used to | |
| 13 * scan keywords. | |
| 14 * | |
| 15 * @coverage dart.engine.parser | |
| 16 */ | |
| 17 class KeywordState { | |
| 18 | |
| 19 /** | |
| 20 * An empty transition table used by leaf states. | |
| 21 */ | |
| 22 static List<KeywordState> _EMPTY_TABLE = new List<KeywordState>(26); | |
| 23 | |
| 24 /** | |
| 25 * The initial state in the state machine. | |
| 26 */ | |
| 27 static KeywordState KEYWORD_STATE = createKeywordStateTable(); | |
| 28 | |
| 29 /** | |
| 30 * Create the next state in the state machine where we have already recognized
the subset of | |
| 31 * strings in the given array of strings starting at the given offset and havi
ng the given length. | |
| 32 * All of these strings have a common prefix and the next character is at the
given start index. | |
| 33 * | |
| 34 * @param start the index of the character in the strings used to transition t
o a new state | |
| 35 * @param strings an array containing all of the strings that will be recogniz
ed by the state | |
| 36 * machine | |
| 37 * @param offset the offset of the first string in the array that has the pref
ix that is assumed | |
| 38 * to have been recognized by the time we reach the state being built | |
| 39 * @param length the number of strings in the array that pass through the stat
e being built | |
| 40 * @return the state that was created | |
| 41 */ | |
| 42 static KeywordState computeKeywordStateTable(int start, List<String> strings,
int offset, int length) { | |
| 43 List<KeywordState> result = new List<KeywordState>(26); | |
| 44 assert(length != 0); | |
| 45 int chunk = 0x0; | |
| 46 int chunkStart = -1; | |
| 47 bool isLeaf = false; | |
| 48 for (int i = offset; i < offset + length; i++) { | |
| 49 if (strings[i].length == start) { | |
| 50 isLeaf = true; | |
| 51 } | |
| 52 if (strings[i].length > start) { | |
| 53 int c = strings[i].codeUnitAt(start); | |
| 54 if (chunk != c) { | |
| 55 if (chunkStart != -1) { | |
| 56 result[chunk - 0x61] = computeKeywordStateTable(start + 1, strings,
chunkStart, i - chunkStart); | |
| 57 } | |
| 58 chunkStart = i; | |
| 59 chunk = c; | |
| 60 } | |
| 61 } | |
| 62 } | |
| 63 if (chunkStart != -1) { | |
| 64 assert(result[chunk - 0x61] == null); | |
| 65 result[chunk - 0x61] = computeKeywordStateTable(start + 1, strings, chunkS
tart, offset + length - chunkStart); | |
| 66 } else { | |
| 67 assert(length == 1); | |
| 68 return new KeywordState(_EMPTY_TABLE, strings[offset]); | |
| 69 } | |
| 70 if (isLeaf) { | |
| 71 return new KeywordState(result, strings[offset]); | |
| 72 } else { | |
| 73 return new KeywordState(result, null); | |
| 74 } | |
| 75 } | |
| 76 | |
| 77 /** | |
| 78 * Create the initial state in the state machine. | |
| 79 * | |
| 80 * @return the state that was created | |
| 81 */ | |
| 82 static KeywordState createKeywordStateTable() { | |
| 83 List<Keyword> values = Keyword.values; | |
| 84 List<String> strings = new List<String>(values.length); | |
| 85 for (int i = 0; i < values.length; i++) { | |
| 86 strings[i] = values[i].syntax; | |
| 87 } | |
| 88 strings.sort(); | |
| 89 return computeKeywordStateTable(0, strings, 0, strings.length); | |
| 90 } | |
| 91 | |
| 92 /** | |
| 93 * A table mapping characters to the states to which those characters will tra
nsition. (The index | |
| 94 * into the array is the offset from the character `'a'` to the transitioning
character.) | |
| 95 */ | |
| 96 List<KeywordState> _table; | |
| 97 | |
| 98 /** | |
| 99 * The keyword that is recognized by this state, or `null` if this state is no
t a terminal | |
| 100 * state. | |
| 101 */ | |
| 102 Keyword _keyword2; | |
| 103 | |
| 104 /** | |
| 105 * Initialize a newly created state to have the given transitions and to recog
nize the keyword | |
| 106 * with the given syntax. | |
| 107 * | |
| 108 * @param table a table mapping characters to the states to which those charac
ters will transition | |
| 109 * @param syntax the syntax of the keyword that is recognized by the state | |
| 110 */ | |
| 111 KeywordState(List<KeywordState> table, String syntax) { | |
| 112 this._table = table; | |
| 113 this._keyword2 = (syntax == null) ? null : Keyword.keywords[syntax]; | |
| 114 } | |
| 115 | |
| 116 /** | |
| 117 * Return the keyword that was recognized by this state, or `null` if this sta
te does not | |
| 118 * recognized a keyword. | |
| 119 * | |
| 120 * @return the keyword that was matched by reaching this state | |
| 121 */ | |
| 122 Keyword keyword() => _keyword2; | |
| 123 | |
| 124 /** | |
| 125 * Return the state that follows this state on a transition of the given chara
cter, or | |
| 126 * `null` if there is no valid state reachable from this state with such a tra
nsition. | |
| 127 * | |
| 128 * @param c the character used to transition from this state to another state | |
| 129 * @return the state that follows this state on a transition of the given char
acter | |
| 130 */ | |
| 131 KeywordState next(int c) => _table[c - 0x61]; | |
| 132 } | |
| 133 /** | |
| 134 * The enumeration `ScannerErrorCode` defines the error codes used for errors de
tected by the | |
| 135 * scanner. | |
| 136 * | |
| 137 * @coverage dart.engine.parser | |
| 138 */ | |
| 139 class ScannerErrorCode extends Enum<ScannerErrorCode> implements ErrorCode { | |
| 140 static final ScannerErrorCode CHARACTER_EXPECTED_AFTER_SLASH = new ScannerErro
rCode.con1('CHARACTER_EXPECTED_AFTER_SLASH', 0, "Character expected after slash"
); | |
| 141 static final ScannerErrorCode ILLEGAL_CHARACTER = new ScannerErrorCode.con1('I
LLEGAL_CHARACTER', 1, "Illegal character %x"); | |
| 142 static final ScannerErrorCode MISSING_DIGIT = new ScannerErrorCode.con1('MISSI
NG_DIGIT', 2, "Decimal digit expected"); | |
| 143 static final ScannerErrorCode MISSING_HEX_DIGIT = new ScannerErrorCode.con1('M
ISSING_HEX_DIGIT', 3, "Hexidecimal digit expected"); | |
| 144 static final ScannerErrorCode MISSING_QUOTE = new ScannerErrorCode.con1('MISSI
NG_QUOTE', 4, "Expected quote (' or \")"); | |
| 145 static final ScannerErrorCode UNTERMINATED_MULTI_LINE_COMMENT = new ScannerErr
orCode.con1('UNTERMINATED_MULTI_LINE_COMMENT', 5, "Unterminated multi-line comme
nt"); | |
| 146 static final ScannerErrorCode UNTERMINATED_STRING_LITERAL = new ScannerErrorCo
de.con1('UNTERMINATED_STRING_LITERAL', 6, "Unterminated string literal"); | |
| 147 static final List<ScannerErrorCode> values = [ | |
| 148 CHARACTER_EXPECTED_AFTER_SLASH, | |
| 149 ILLEGAL_CHARACTER, | |
| 150 MISSING_DIGIT, | |
| 151 MISSING_HEX_DIGIT, | |
| 152 MISSING_QUOTE, | |
| 153 UNTERMINATED_MULTI_LINE_COMMENT, | |
| 154 UNTERMINATED_STRING_LITERAL]; | |
| 155 | |
| 156 /** | |
| 157 * The template used to create the message to be displayed for this error. | |
| 158 */ | |
| 159 String _message; | |
| 160 | |
| 161 /** | |
| 162 * The template used to create the correction to be displayed for this error,
or `null` if | |
| 163 * there is no correction information for this error. | |
| 164 */ | |
| 165 String correction10; | |
| 166 | |
| 167 /** | |
| 168 * Initialize a newly created error code to have the given message. | |
| 169 * | |
| 170 * @param message the message template used to create the message to be displa
yed for this error | |
| 171 */ | |
| 172 ScannerErrorCode.con1(String name, int ordinal, String message) : super(name,
ordinal) { | |
| 173 this._message = message; | |
| 174 } | |
| 175 | |
| 176 /** | |
| 177 * Initialize a newly created error code to have the given message and correct
ion. | |
| 178 * | |
| 179 * @param message the template used to create the message to be displayed for
the error | |
| 180 * @param correction the template used to create the correction to be displaye
d for the error | |
| 181 */ | |
| 182 ScannerErrorCode.con2(String name, int ordinal, String message, String correct
ion) : super(name, ordinal) { | |
| 183 this._message = message; | |
| 184 this.correction10 = correction; | |
| 185 } | |
| 186 String get correction => correction10; | |
| 187 ErrorSeverity get errorSeverity => ErrorSeverity.ERROR; | |
| 188 String get message => _message; | |
| 189 ErrorType get type => ErrorType.SYNTACTIC_ERROR; | |
| 190 } | |
| 191 /** | |
| 192 * Instances of the class `SubSequenceReader` implement a [CharacterReader] that
reads | |
| 193 * characters from a character sequence, but adds a delta when reporting the cur
rent character | |
| 194 * offset so that the character sequence can be a subsequence from a larger sequ
ence. | |
| 195 */ | |
| 196 class SubSequenceReader extends CharSequenceReader { | |
| 197 | |
| 198 /** | |
| 199 * The offset from the beginning of the file to the beginning of the source be
ing scanned. | |
| 200 */ | |
| 201 int _offsetDelta = 0; | |
| 202 | |
| 203 /** | |
| 204 * Initialize a newly created reader to read the characters in the given seque
nce. | |
| 205 * | |
| 206 * @param sequence the sequence from which characters will be read | |
| 207 * @param offsetDelta the offset from the beginning of the file to the beginni
ng of the source | |
| 208 * being scanned | |
| 209 */ | |
| 210 SubSequenceReader(CharSequence sequence, int offsetDelta) : super(sequence) { | |
| 211 this._offsetDelta = offsetDelta; | |
| 212 } | |
| 213 int get offset => _offsetDelta + super.offset; | |
| 214 String getString(int start, int endDelta) => super.getString(start - _offsetDe
lta, endDelta); | |
| 215 void set offset(int offset) { | |
| 216 super.offset = offset - _offsetDelta; | |
| 217 } | |
| 218 } | |
| 219 /** | |
| 220 * Instances of the class `TokenWithComment` represent a string token that is pr
eceded by | |
| 221 * comments. | |
| 222 * | |
| 223 * @coverage dart.engine.parser | |
| 224 */ | |
| 225 class StringTokenWithComment extends StringToken { | |
| 226 | |
| 227 /** | |
| 228 * The first comment in the list of comments that precede this token. | |
| 229 */ | |
| 230 Token _precedingComment; | |
| 231 | |
| 232 /** | |
| 233 * Initialize a newly created token to have the given type and offset and to b
e preceded by the | |
| 234 * comments reachable from the given comment. | |
| 235 * | |
| 236 * @param type the type of the token | |
| 237 * @param offset the offset from the beginning of the file to the first charac
ter in the token | |
| 238 * @param precedingComment the first comment in the list of comments that prec
ede this token | |
| 239 */ | |
| 240 StringTokenWithComment(TokenType type, String value, int offset, Token precedi
ngComment) : super(type, value, offset) { | |
| 241 this._precedingComment = precedingComment; | |
| 242 } | |
| 243 Token copy() => new StringTokenWithComment(type, lexeme, offset, copyComments(
_precedingComment)); | |
| 244 Token get precedingComments => _precedingComment; | |
| 245 void applyDelta(int delta) { | |
| 246 super.applyDelta(delta); | |
| 247 Token token = _precedingComment; | |
| 248 while (token != null) { | |
| 249 token.applyDelta(delta); | |
| 250 token = token.next; | |
| 251 } | |
| 252 } | |
| 253 } | |
| 254 /** | |
| 255 * The enumeration `Keyword` defines the keywords in the Dart programming langua
ge. | |
| 256 * | |
| 257 * @coverage dart.engine.parser | |
| 258 */ | |
| 259 class Keyword extends Enum<Keyword> { | |
| 260 static final Keyword ASSERT = new Keyword.con1('ASSERT', 0, "assert"); | |
| 261 static final Keyword BREAK = new Keyword.con1('BREAK', 1, "break"); | |
| 262 static final Keyword CASE = new Keyword.con1('CASE', 2, "case"); | |
| 263 static final Keyword CATCH = new Keyword.con1('CATCH', 3, "catch"); | |
| 264 static final Keyword CLASS = new Keyword.con1('CLASS', 4, "class"); | |
| 265 static final Keyword CONST = new Keyword.con1('CONST', 5, "const"); | |
| 266 static final Keyword CONTINUE = new Keyword.con1('CONTINUE', 6, "continue"); | |
| 267 static final Keyword DEFAULT = new Keyword.con1('DEFAULT', 7, "default"); | |
| 268 static final Keyword DO = new Keyword.con1('DO', 8, "do"); | |
| 269 static final Keyword ELSE = new Keyword.con1('ELSE', 9, "else"); | |
| 270 static final Keyword ENUM = new Keyword.con1('ENUM', 10, "enum"); | |
| 271 static final Keyword EXTENDS = new Keyword.con1('EXTENDS', 11, "extends"); | |
| 272 static final Keyword FALSE = new Keyword.con1('FALSE', 12, "false"); | |
| 273 static final Keyword FINAL = new Keyword.con1('FINAL', 13, "final"); | |
| 274 static final Keyword FINALLY = new Keyword.con1('FINALLY', 14, "finally"); | |
| 275 static final Keyword FOR = new Keyword.con1('FOR', 15, "for"); | |
| 276 static final Keyword IF = new Keyword.con1('IF', 16, "if"); | |
| 277 static final Keyword IN = new Keyword.con1('IN', 17, "in"); | |
| 278 static final Keyword IS = new Keyword.con1('IS', 18, "is"); | |
| 279 static final Keyword NEW = new Keyword.con1('NEW', 19, "new"); | |
| 280 static final Keyword NULL = new Keyword.con1('NULL', 20, "null"); | |
| 281 static final Keyword RETHROW = new Keyword.con1('RETHROW', 21, "rethrow"); | |
| 282 static final Keyword RETURN = new Keyword.con1('RETURN', 22, "return"); | |
| 283 static final Keyword SUPER = new Keyword.con1('SUPER', 23, "super"); | |
| 284 static final Keyword SWITCH = new Keyword.con1('SWITCH', 24, "switch"); | |
| 285 static final Keyword THIS = new Keyword.con1('THIS', 25, "this"); | |
| 286 static final Keyword THROW = new Keyword.con1('THROW', 26, "throw"); | |
| 287 static final Keyword TRUE = new Keyword.con1('TRUE', 27, "true"); | |
| 288 static final Keyword TRY = new Keyword.con1('TRY', 28, "try"); | |
| 289 static final Keyword VAR = new Keyword.con1('VAR', 29, "var"); | |
| 290 static final Keyword VOID = new Keyword.con1('VOID', 30, "void"); | |
| 291 static final Keyword WHILE = new Keyword.con1('WHILE', 31, "while"); | |
| 292 static final Keyword WITH = new Keyword.con1('WITH', 32, "with"); | |
| 293 static final Keyword ABSTRACT = new Keyword.con2('ABSTRACT', 33, "abstract", t
rue); | |
| 294 static final Keyword AS = new Keyword.con2('AS', 34, "as", true); | |
| 295 static final Keyword DYNAMIC = new Keyword.con2('DYNAMIC', 35, "dynamic", true
); | |
| 296 static final Keyword EXPORT = new Keyword.con2('EXPORT', 36, "export", true); | |
| 297 static final Keyword EXTERNAL = new Keyword.con2('EXTERNAL', 37, "external", t
rue); | |
| 298 static final Keyword FACTORY = new Keyword.con2('FACTORY', 38, "factory", true
); | |
| 299 static final Keyword GET = new Keyword.con2('GET', 39, "get", true); | |
| 300 static final Keyword IMPLEMENTS = new Keyword.con2('IMPLEMENTS', 40, "implemen
ts", true); | |
| 301 static final Keyword IMPORT = new Keyword.con2('IMPORT', 41, "import", true); | |
| 302 static final Keyword LIBRARY = new Keyword.con2('LIBRARY', 42, "library", true
); | |
| 303 static final Keyword OPERATOR = new Keyword.con2('OPERATOR', 43, "operator", t
rue); | |
| 304 static final Keyword PART = new Keyword.con2('PART', 44, "part", true); | |
| 305 static final Keyword SET = new Keyword.con2('SET', 45, "set", true); | |
| 306 static final Keyword STATIC = new Keyword.con2('STATIC', 46, "static", true); | |
| 307 static final Keyword TYPEDEF = new Keyword.con2('TYPEDEF', 47, "typedef", true
); | |
| 308 static final List<Keyword> values = [ | |
| 309 ASSERT, | |
| 310 BREAK, | |
| 311 CASE, | |
| 312 CATCH, | |
| 313 CLASS, | |
| 314 CONST, | |
| 315 CONTINUE, | |
| 316 DEFAULT, | |
| 317 DO, | |
| 318 ELSE, | |
| 319 ENUM, | |
| 320 EXTENDS, | |
| 321 FALSE, | |
| 322 FINAL, | |
| 323 FINALLY, | |
| 324 FOR, | |
| 325 IF, | |
| 326 IN, | |
| 327 IS, | |
| 328 NEW, | |
| 329 NULL, | |
| 330 RETHROW, | |
| 331 RETURN, | |
| 332 SUPER, | |
| 333 SWITCH, | |
| 334 THIS, | |
| 335 THROW, | |
| 336 TRUE, | |
| 337 TRY, | |
| 338 VAR, | |
| 339 VOID, | |
| 340 WHILE, | |
| 341 WITH, | |
| 342 ABSTRACT, | |
| 343 AS, | |
| 344 DYNAMIC, | |
| 345 EXPORT, | |
| 346 EXTERNAL, | |
| 347 FACTORY, | |
| 348 GET, | |
| 349 IMPLEMENTS, | |
| 350 IMPORT, | |
| 351 LIBRARY, | |
| 352 OPERATOR, | |
| 353 PART, | |
| 354 SET, | |
| 355 STATIC, | |
| 356 TYPEDEF]; | |
| 357 | |
| 358 /** | |
| 359 * The lexeme for the keyword. | |
| 360 */ | |
| 361 String syntax; | |
| 362 | |
| 363 /** | |
| 364 * A flag indicating whether the keyword is a pseudo-keyword. Pseudo keywords
can be used as | |
| 365 * identifiers. | |
| 366 */ | |
| 367 bool isPseudoKeyword = false; | |
| 368 | |
| 369 /** | |
| 370 * A table mapping the lexemes of keywords to the corresponding keyword. | |
| 371 */ | |
| 372 static Map<String, Keyword> keywords = createKeywordMap(); | |
| 373 | |
| 374 /** | |
| 375 * Create a table mapping the lexemes of keywords to the corresponding keyword
. | |
| 376 * | |
| 377 * @return the table that was created | |
| 378 */ | |
| 379 static Map<String, Keyword> createKeywordMap() { | |
| 380 LinkedHashMap<String, Keyword> result = new LinkedHashMap<String, Keyword>()
; | |
| 381 for (Keyword keyword in values) { | |
| 382 result[keyword.syntax] = keyword; | |
| 383 } | |
| 384 return result; | |
| 385 } | |
| 386 | |
| 387 /** | |
| 388 * Initialize a newly created keyword to have the given syntax. The keyword is
not a | |
| 389 * pseudo-keyword. | |
| 390 * | |
| 391 * @param syntax the lexeme for the keyword | |
| 392 */ | |
| 393 Keyword.con1(String name, int ordinal, String syntax) : this.con2(name, ordina
l, syntax, false); | |
| 394 | |
| 395 /** | |
| 396 * Initialize a newly created keyword to have the given syntax. The keyword is
a pseudo-keyword if | |
| 397 * the given flag is `true`. | |
| 398 * | |
| 399 * @param syntax the lexeme for the keyword | |
| 400 * @param isPseudoKeyword `true` if this keyword is a pseudo-keyword | |
| 401 */ | |
| 402 Keyword.con2(String name, int ordinal, String syntax, bool isPseudoKeyword) :
super(name, ordinal) { | |
| 403 this.syntax = syntax; | |
| 404 this.isPseudoKeyword = isPseudoKeyword; | |
| 405 } | |
| 406 } | |
| 407 /** | |
| 408 * Instances of the class `CharSequenceReader` implement a [CharacterReader] tha
t reads | |
| 409 * characters from a character sequence. | |
| 410 */ | |
| 411 class CharSequenceReader implements CharacterReader { | |
| 412 | |
| 413 /** | |
| 414 * The sequence from which characters will be read. | |
| 415 */ | |
| 416 CharSequence _sequence; | |
| 417 | |
| 418 /** | |
| 419 * The number of characters in the string. | |
| 420 */ | |
| 421 int _stringLength = 0; | |
| 422 | |
| 423 /** | |
| 424 * The index, relative to the string, of the last character that was read. | |
| 425 */ | |
| 426 int _charOffset = 0; | |
| 427 | |
| 428 /** | |
| 429 * Initialize a newly created reader to read the characters in the given seque
nce. | |
| 430 * | |
| 431 * @param sequence the sequence from which characters will be read | |
| 432 */ | |
| 433 CharSequenceReader(CharSequence sequence) { | |
| 434 this._sequence = sequence; | |
| 435 this._stringLength = sequence.length(); | |
| 436 this._charOffset = -1; | |
| 437 } | |
| 438 int advance() { | |
| 439 if (_charOffset + 1 >= _stringLength) { | |
| 440 return -1; | |
| 441 } | |
| 442 return _sequence.charAt(++_charOffset); | |
| 443 } | |
| 444 int get offset => _charOffset; | |
| 445 String getString(int start, int endDelta) => _sequence.subSequence(start, _cha
rOffset + 1 + endDelta).toString(); | |
| 446 int peek() { | |
| 447 if (_charOffset + 1 >= _sequence.length()) { | |
| 448 return -1; | |
| 449 } | |
| 450 return _sequence.charAt(_charOffset + 1); | |
| 451 } | |
| 452 void set offset(int offset) { | |
| 453 _charOffset = offset; | |
| 454 } | |
| 455 } | |
| 456 /** | |
| 457 * Instances of the class `IncrementalScanner` implement a scanner that scans a
subset of a | |
| 458 * string and inserts the resulting tokens into the middle of an existing token
stream. | |
| 459 * | |
| 460 * @coverage dart.engine.parser | |
| 461 */ | |
| 462 class IncrementalScanner extends Scanner { | |
| 463 | |
| 464 /** | |
| 465 * The reader used to access the characters in the source. | |
| 466 */ | |
| 467 CharacterReader _reader; | |
| 468 | |
| 469 /** | |
| 470 * A map from tokens that were copied to the copies of the tokens. | |
| 471 */ | |
| 472 TokenMap _tokenMap = new TokenMap(); | |
| 473 | |
| 474 /** | |
| 475 * Initialize a newly created scanner. | |
| 476 * | |
| 477 * @param source the source being scanned | |
| 478 * @param reader the character reader used to read the characters in the sourc
e | |
| 479 * @param errorListener the error listener that will be informed of any errors
that are found | |
| 480 */ | |
| 481 IncrementalScanner(Source source, CharacterReader reader, AnalysisErrorListene
r errorListener) : super(source, reader, errorListener) { | |
| 482 this._reader = reader; | |
| 483 } | |
| 484 | |
| 485 /** | |
| 486 * Given the stream of tokens scanned from the original source, the modified s
ource (the result of | |
| 487 * replacing one contiguous range of characters with another string of charact
ers), and a | |
| 488 * specification of the modification that was made, return a stream of tokens
scanned from the | |
| 489 * modified source. The original stream of tokens will not be modified. | |
| 490 * | |
| 491 * @param originalStream the stream of tokens scanned from the original source | |
| 492 * @param index the index of the first character in both the original and modi
fied source that was | |
| 493 * affected by the modification | |
| 494 * @param removedLength the number of characters removed from the original sou
rce | |
| 495 * @param insertedLength the number of characters added to the modified source | |
| 496 */ | |
| 497 Token rescan(Token originalStream, int index, int removedLength, int insertedL
ength) { | |
| 498 while (originalStream.end < index) { | |
| 499 originalStream = copyAndAdvance(originalStream, 0); | |
| 500 } | |
| 501 int modifiedEnd = index + insertedLength - 1; | |
| 502 _reader.offset = Math.min(originalStream.offset, index) - 1; | |
| 503 int next = _reader.advance(); | |
| 504 while (next != -1 && _reader.offset <= modifiedEnd) { | |
| 505 next = bigSwitch(next); | |
| 506 } | |
| 507 int removedEnd = index + removedLength - 1; | |
| 508 while (originalStream.offset <= removedEnd) { | |
| 509 originalStream = originalStream.next; | |
| 510 } | |
| 511 int delta = insertedLength - removedLength; | |
| 512 while (originalStream.type != TokenType.EOF) { | |
| 513 originalStream = copyAndAdvance(originalStream, delta); | |
| 514 } | |
| 515 copyAndAdvance(originalStream, delta); | |
| 516 return firstToken(); | |
| 517 } | |
| 518 Token copyAndAdvance(Token originalToken, int delta) { | |
| 519 Token copiedToken = originalToken.copy(); | |
| 520 _tokenMap.put(originalToken, copiedToken); | |
| 521 copiedToken.applyDelta(delta); | |
| 522 appendToken(copiedToken); | |
| 523 Token originalComment = originalToken.precedingComments; | |
| 524 Token copiedComment = originalToken.precedingComments; | |
| 525 while (originalComment != null) { | |
| 526 _tokenMap.put(originalComment, copiedComment); | |
| 527 originalComment = originalComment.next; | |
| 528 copiedComment = copiedComment.next; | |
| 529 } | |
| 530 return originalToken.next; | |
| 531 } | |
| 532 } | |
| 533 /** | |
| 534 * The class `Scanner` implements a scanner for Dart code. | |
| 535 * | |
| 536 * The lexical structure of Dart is ambiguous without knowledge of the context i
n which a token is | |
| 537 * being scanned. For example, without context we cannot determine whether sourc
e of the form "<<" | |
| 538 * should be scanned as a single left-shift operator or as two left angle bracke
ts. This scanner | |
| 539 * does not have any context, so it always resolves such conflicts by scanning t
he longest possible | |
| 540 * token. | |
| 541 * | |
| 542 * @coverage dart.engine.parser | |
| 543 */ | |
| 544 class Scanner { | |
| 545 | |
| 546 /** | |
| 547 * The source being scanned. | |
| 548 */ | |
| 549 Source source; | |
| 550 | |
| 551 /** | |
| 552 * The reader used to access the characters in the source. | |
| 553 */ | |
| 554 CharacterReader _reader; | |
| 555 | |
| 556 /** | |
| 557 * The error listener that will be informed of any errors that are found durin
g the scan. | |
| 558 */ | |
| 559 AnalysisErrorListener _errorListener; | |
| 560 | |
| 561 /** | |
| 562 * The token pointing to the head of the linked list of tokens. | |
| 563 */ | |
| 564 Token _tokens; | |
| 565 | |
| 566 /** | |
| 567 * The last token that was scanned. | |
| 568 */ | |
| 569 Token _tail; | |
| 570 | |
| 571 /** | |
| 572 * The first token in the list of comment tokens found since the last non-comm
ent token. | |
| 573 */ | |
| 574 Token _firstComment; | |
| 575 | |
| 576 /** | |
| 577 * The last token in the list of comment tokens found since the last non-comme
nt token. | |
| 578 */ | |
| 579 Token _lastComment; | |
| 580 | |
| 581 /** | |
| 582 * The index of the first character of the current token. | |
| 583 */ | |
| 584 int _tokenStart = 0; | |
| 585 | |
| 586 /** | |
| 587 * A list containing the offsets of the first character of each line in the so
urce code. | |
| 588 */ | |
| 589 List<int> _lineStarts = new List<int>(); | |
| 590 | |
| 591 /** | |
| 592 * A list, treated something like a stack, of tokens representing the beginnin
g of a matched pair. | |
| 593 * It is used to pair the end tokens with the begin tokens. | |
| 594 */ | |
| 595 List<BeginToken> _groupingStack = new List<BeginToken>(); | |
| 596 | |
| 597 /** | |
| 598 * The index of the last item in the [groupingStack], or `-1` if the stack is
empty. | |
| 599 */ | |
| 600 int _stackEnd = -1; | |
| 601 | |
| 602 /** | |
| 603 * A flag indicating whether any unmatched groups were found during the parse. | |
| 604 */ | |
| 605 bool _hasUnmatchedGroups2 = false; | |
| 606 | |
| 607 /** | |
| 608 * Initialize a newly created scanner. | |
| 609 * | |
| 610 * @param source the source being scanned | |
| 611 * @param reader the character reader used to read the characters in the sourc
e | |
| 612 * @param errorListener the error listener that will be informed of any errors
that are found | |
| 613 */ | |
| 614 Scanner(Source source, CharacterReader reader, AnalysisErrorListener errorList
ener) { | |
| 615 this.source = source; | |
| 616 this._reader = reader; | |
| 617 this._errorListener = errorListener; | |
| 618 _tokens = new Token(TokenType.EOF, -1); | |
| 619 _tokens.setNext(_tokens); | |
| 620 _tail = _tokens; | |
| 621 _tokenStart = -1; | |
| 622 _lineStarts.add(0); | |
| 623 } | |
| 624 | |
| 625 /** | |
| 626 * Return an array containing the offsets of the first character of each line
in the source code. | |
| 627 * | |
| 628 * @return an array containing the offsets of the first character of each line
in the source code | |
| 629 */ | |
| 630 List<int> get lineStarts => _lineStarts; | |
| 631 | |
| 632 /** | |
| 633 * Return `true` if any unmatched groups were found during the parse. | |
| 634 * | |
| 635 * @return `true` if any unmatched groups were found during the parse | |
| 636 */ | |
| 637 bool hasUnmatchedGroups() => _hasUnmatchedGroups2; | |
| 638 | |
| 639 /** | |
| 640 * Record that the source begins on the given line and column at the current o
ffset as given by | |
| 641 * the reader. The line starts for lines before the given line will not be cor
rect. | |
| 642 * | |
| 643 * This method must be invoked at most one time and must be invoked before sca
nning begins. The | |
| 644 * values provided must be sensible. The results are undefined if these condit
ions are violated. | |
| 645 * | |
| 646 * @param line the one-based index of the line containing the first character
of the source | |
| 647 * @param column the one-based index of the column in which the first characte
r of the source | |
| 648 * occurs | |
| 649 */ | |
| 650 void setSourceStart(int line, int column) { | |
| 651 int offset = _reader.offset; | |
| 652 if (line < 1 || column < 1 || offset < 0 || (line + column - 2) >= offset) { | |
| 653 return; | |
| 654 } | |
| 655 for (int i = 2; i < line; i++) { | |
| 656 _lineStarts.add(1); | |
| 657 } | |
| 658 _lineStarts.add(offset - column + 1); | |
| 659 } | |
| 660 | |
| 661 /** | |
| 662 * Scan the source code to produce a list of tokens representing the source. | |
| 663 * | |
| 664 * @return the first token in the list of tokens that were produced | |
| 665 */ | |
| 666 Token tokenize() { | |
| 667 InstrumentationBuilder instrumentation = Instrumentation.builder2("dart.engi
ne.AbstractScanner.tokenize"); | |
| 668 int tokenCounter = 0; | |
| 669 try { | |
| 670 int next = _reader.advance(); | |
| 671 while (next != -1) { | |
| 672 tokenCounter++; | |
| 673 next = bigSwitch(next); | |
| 674 } | |
| 675 appendEofToken(); | |
| 676 instrumentation.metric2("tokensCount", tokenCounter); | |
| 677 return firstToken(); | |
| 678 } finally { | |
| 679 instrumentation.log2(2); | |
| 680 } | |
| 681 } | |
| 682 | |
| 683 /** | |
| 684 * Append the given token to the end of the token stream being scanned. This m
ethod is intended to | |
| 685 * be used by subclasses that copy existing tokens and should not normally be
used because it will | |
| 686 * fail to correctly associate any comments with the token being passed in. | |
| 687 * | |
| 688 * @param token the token to be appended | |
| 689 */ | |
| 690 void appendToken(Token token) { | |
| 691 _tail = _tail.setNext(token); | |
| 692 } | |
| 693 int bigSwitch(int next) { | |
| 694 beginToken(); | |
| 695 if (next == 0xD) { | |
| 696 next = _reader.advance(); | |
| 697 if (next == 0xA) { | |
| 698 next = _reader.advance(); | |
| 699 } | |
| 700 recordStartOfLine(); | |
| 701 return next; | |
| 702 } else if (next == 0xA) { | |
| 703 next = _reader.advance(); | |
| 704 recordStartOfLine(); | |
| 705 return next; | |
| 706 } else if (next == 0x9 || next == 0x20) { | |
| 707 return _reader.advance(); | |
| 708 } | |
| 709 if (next == 0x72) { | |
| 710 int peek = _reader.peek(); | |
| 711 if (peek == 0x22 || peek == 0x27) { | |
| 712 int start = _reader.offset; | |
| 713 return tokenizeString(_reader.advance(), start, true); | |
| 714 } | |
| 715 } | |
| 716 if (0x61 <= next && next <= 0x7A) { | |
| 717 return tokenizeKeywordOrIdentifier(next, true); | |
| 718 } | |
| 719 if ((0x41 <= next && next <= 0x5A) || next == 0x5F || next == 0x24) { | |
| 720 return tokenizeIdentifier(next, _reader.offset, true); | |
| 721 } | |
| 722 if (next == 0x3C) { | |
| 723 return tokenizeLessThan(next); | |
| 724 } | |
| 725 if (next == 0x3E) { | |
| 726 return tokenizeGreaterThan(next); | |
| 727 } | |
| 728 if (next == 0x3D) { | |
| 729 return tokenizeEquals(next); | |
| 730 } | |
| 731 if (next == 0x21) { | |
| 732 return tokenizeExclamation(next); | |
| 733 } | |
| 734 if (next == 0x2B) { | |
| 735 return tokenizePlus(next); | |
| 736 } | |
| 737 if (next == 0x2D) { | |
| 738 return tokenizeMinus(next); | |
| 739 } | |
| 740 if (next == 0x2A) { | |
| 741 return tokenizeMultiply(next); | |
| 742 } | |
| 743 if (next == 0x25) { | |
| 744 return tokenizePercent(next); | |
| 745 } | |
| 746 if (next == 0x26) { | |
| 747 return tokenizeAmpersand(next); | |
| 748 } | |
| 749 if (next == 0x7C) { | |
| 750 return tokenizeBar(next); | |
| 751 } | |
| 752 if (next == 0x5E) { | |
| 753 return tokenizeCaret(next); | |
| 754 } | |
| 755 if (next == 0x5B) { | |
| 756 return tokenizeOpenSquareBracket(next); | |
| 757 } | |
| 758 if (next == 0x7E) { | |
| 759 return tokenizeTilde(next); | |
| 760 } | |
| 761 if (next == 0x5C) { | |
| 762 appendToken2(TokenType.BACKSLASH); | |
| 763 return _reader.advance(); | |
| 764 } | |
| 765 if (next == 0x23) { | |
| 766 return tokenizeTag(next); | |
| 767 } | |
| 768 if (next == 0x28) { | |
| 769 appendBeginToken(TokenType.OPEN_PAREN); | |
| 770 return _reader.advance(); | |
| 771 } | |
| 772 if (next == 0x29) { | |
| 773 appendEndToken(TokenType.CLOSE_PAREN, TokenType.OPEN_PAREN); | |
| 774 return _reader.advance(); | |
| 775 } | |
| 776 if (next == 0x2C) { | |
| 777 appendToken2(TokenType.COMMA); | |
| 778 return _reader.advance(); | |
| 779 } | |
| 780 if (next == 0x3A) { | |
| 781 appendToken2(TokenType.COLON); | |
| 782 return _reader.advance(); | |
| 783 } | |
| 784 if (next == 0x3B) { | |
| 785 appendToken2(TokenType.SEMICOLON); | |
| 786 return _reader.advance(); | |
| 787 } | |
| 788 if (next == 0x3F) { | |
| 789 appendToken2(TokenType.QUESTION); | |
| 790 return _reader.advance(); | |
| 791 } | |
| 792 if (next == 0x5D) { | |
| 793 appendEndToken(TokenType.CLOSE_SQUARE_BRACKET, TokenType.OPEN_SQUARE_BRACK
ET); | |
| 794 return _reader.advance(); | |
| 795 } | |
| 796 if (next == 0x60) { | |
| 797 appendToken2(TokenType.BACKPING); | |
| 798 return _reader.advance(); | |
| 799 } | |
| 800 if (next == 0x7B) { | |
| 801 appendBeginToken(TokenType.OPEN_CURLY_BRACKET); | |
| 802 return _reader.advance(); | |
| 803 } | |
| 804 if (next == 0x7D) { | |
| 805 appendEndToken(TokenType.CLOSE_CURLY_BRACKET, TokenType.OPEN_CURLY_BRACKET
); | |
| 806 return _reader.advance(); | |
| 807 } | |
| 808 if (next == 0x2F) { | |
| 809 return tokenizeSlashOrComment(next); | |
| 810 } | |
| 811 if (next == 0x40) { | |
| 812 appendToken2(TokenType.AT); | |
| 813 return _reader.advance(); | |
| 814 } | |
| 815 if (next == 0x22 || next == 0x27) { | |
| 816 return tokenizeString(next, _reader.offset, false); | |
| 817 } | |
| 818 if (next == 0x2E) { | |
| 819 return tokenizeDotOrNumber(next); | |
| 820 } | |
| 821 if (next == 0x30) { | |
| 822 return tokenizeHexOrNumber(next); | |
| 823 } | |
| 824 if (0x31 <= next && next <= 0x39) { | |
| 825 return tokenizeNumber(next); | |
| 826 } | |
| 827 if (next == -1) { | |
| 828 return -1; | |
| 829 } | |
| 830 reportError(ScannerErrorCode.ILLEGAL_CHARACTER, [next]); | |
| 831 return _reader.advance(); | |
| 832 } | |
| 833 | |
| 834 /** | |
| 835 * Return the first token in the token stream that was scanned. | |
| 836 * | |
| 837 * @return the first token in the token stream that was scanned | |
| 838 */ | |
| 839 Token firstToken() => _tokens.next; | |
| 840 | |
| 841 /** | |
| 842 * Record the fact that we are at the beginning of a new line in the source. | |
| 843 */ | |
| 844 void recordStartOfLine() { | |
| 845 _lineStarts.add(_reader.offset); | |
| 846 } | |
| 847 void appendBeginToken(TokenType type) { | |
| 848 BeginToken token; | |
| 849 if (_firstComment == null) { | |
| 850 token = new BeginToken(type, _tokenStart); | |
| 851 } else { | |
| 852 token = new BeginTokenWithComment(type, _tokenStart, _firstComment); | |
| 853 _firstComment = null; | |
| 854 _lastComment = null; | |
| 855 } | |
| 856 _tail = _tail.setNext(token); | |
| 857 _groupingStack.add(token); | |
| 858 _stackEnd++; | |
| 859 } | |
| 860 void appendCommentToken(TokenType type, String value) { | |
| 861 if (_firstComment == null) { | |
| 862 _firstComment = new StringToken(type, value, _tokenStart); | |
| 863 _lastComment = _firstComment; | |
| 864 } else { | |
| 865 _lastComment = _lastComment.setNext(new StringToken(type, value, _tokenSta
rt)); | |
| 866 } | |
| 867 } | |
| 868 void appendEndToken(TokenType type, TokenType beginType) { | |
| 869 Token token; | |
| 870 if (_firstComment == null) { | |
| 871 token = new Token(type, _tokenStart); | |
| 872 } else { | |
| 873 token = new TokenWithComment(type, _tokenStart, _firstComment); | |
| 874 _firstComment = null; | |
| 875 _lastComment = null; | |
| 876 } | |
| 877 _tail = _tail.setNext(token); | |
| 878 if (_stackEnd >= 0) { | |
| 879 BeginToken begin = _groupingStack[_stackEnd]; | |
| 880 if (identical(begin.type, beginType)) { | |
| 881 begin.endToken = token; | |
| 882 _groupingStack.removeAt(_stackEnd--); | |
| 883 } | |
| 884 } | |
| 885 } | |
| 886 void appendEofToken() { | |
| 887 Token eofToken; | |
| 888 if (_firstComment == null) { | |
| 889 eofToken = new Token(TokenType.EOF, _reader.offset + 1); | |
| 890 } else { | |
| 891 eofToken = new TokenWithComment(TokenType.EOF, _reader.offset + 1, _firstC
omment); | |
| 892 _firstComment = null; | |
| 893 _lastComment = null; | |
| 894 } | |
| 895 eofToken.setNext(eofToken); | |
| 896 _tail = _tail.setNext(eofToken); | |
| 897 if (_stackEnd >= 0) { | |
| 898 _hasUnmatchedGroups2 = true; | |
| 899 } | |
| 900 } | |
| 901 void appendKeywordToken(Keyword keyword) { | |
| 902 if (_firstComment == null) { | |
| 903 _tail = _tail.setNext(new KeywordToken(keyword, _tokenStart)); | |
| 904 } else { | |
| 905 _tail = _tail.setNext(new KeywordTokenWithComment(keyword, _tokenStart, _f
irstComment)); | |
| 906 _firstComment = null; | |
| 907 _lastComment = null; | |
| 908 } | |
| 909 } | |
| 910 void appendStringToken(TokenType type, String value) { | |
| 911 if (_firstComment == null) { | |
| 912 _tail = _tail.setNext(new StringToken(type, value, _tokenStart)); | |
| 913 } else { | |
| 914 _tail = _tail.setNext(new StringTokenWithComment(type, value, _tokenStart,
_firstComment)); | |
| 915 _firstComment = null; | |
| 916 _lastComment = null; | |
| 917 } | |
| 918 } | |
| 919 void appendStringToken2(TokenType type, String value, int offset) { | |
| 920 if (_firstComment == null) { | |
| 921 _tail = _tail.setNext(new StringToken(type, value, _tokenStart + offset)); | |
| 922 } else { | |
| 923 _tail = _tail.setNext(new StringTokenWithComment(type, value, _tokenStart
+ offset, _firstComment)); | |
| 924 _firstComment = null; | |
| 925 _lastComment = null; | |
| 926 } | |
| 927 } | |
| 928 void appendToken2(TokenType type) { | |
| 929 if (_firstComment == null) { | |
| 930 _tail = _tail.setNext(new Token(type, _tokenStart)); | |
| 931 } else { | |
| 932 _tail = _tail.setNext(new TokenWithComment(type, _tokenStart, _firstCommen
t)); | |
| 933 _firstComment = null; | |
| 934 _lastComment = null; | |
| 935 } | |
| 936 } | |
| 937 void appendToken3(TokenType type, int offset) { | |
| 938 if (_firstComment == null) { | |
| 939 _tail = _tail.setNext(new Token(type, offset)); | |
| 940 } else { | |
| 941 _tail = _tail.setNext(new TokenWithComment(type, offset, _firstComment)); | |
| 942 _firstComment = null; | |
| 943 _lastComment = null; | |
| 944 } | |
| 945 } | |
| 946 void beginToken() { | |
| 947 _tokenStart = _reader.offset; | |
| 948 } | |
| 949 | |
| 950 /** | |
| 951 * Return the beginning token corresponding to a closing brace that was found
while scanning | |
| 952 * inside a string interpolation expression. Tokens that cannot be matched wit
h the closing brace | |
| 953 * will be dropped from the stack. | |
| 954 * | |
| 955 * @return the token to be paired with the closing brace | |
| 956 */ | |
| 957 BeginToken findTokenMatchingClosingBraceInInterpolationExpression() { | |
| 958 while (_stackEnd >= 0) { | |
| 959 BeginToken begin = _groupingStack[_stackEnd]; | |
| 960 if (identical(begin.type, TokenType.OPEN_CURLY_BRACKET) || identical(begin
.type, TokenType.STRING_INTERPOLATION_EXPRESSION)) { | |
| 961 return begin; | |
| 962 } | |
| 963 _hasUnmatchedGroups2 = true; | |
| 964 _groupingStack.removeAt(_stackEnd--); | |
| 965 } | |
| 966 return null; | |
| 967 } | |
| 968 | |
| 969 /** | |
| 970 * Report an error at the current offset. | |
| 971 * | |
| 972 * @param errorCode the error code indicating the nature of the error | |
| 973 * @param arguments any arguments needed to complete the error message | |
| 974 */ | |
| 975 void reportError(ScannerErrorCode errorCode, List<Object> arguments) { | |
| 976 _errorListener.onError(new AnalysisError.con2(source, _reader.offset, 1, err
orCode, arguments)); | |
| 977 } | |
| 978 int select(int choice, TokenType yesType, TokenType noType) { | |
| 979 int next = _reader.advance(); | |
| 980 if (next == choice) { | |
| 981 appendToken2(yesType); | |
| 982 return _reader.advance(); | |
| 983 } else { | |
| 984 appendToken2(noType); | |
| 985 return next; | |
| 986 } | |
| 987 } | |
| 988 int select2(int choice, TokenType yesType, TokenType noType, int offset) { | |
| 989 int next = _reader.advance(); | |
| 990 if (next == choice) { | |
| 991 appendToken3(yesType, offset); | |
| 992 return _reader.advance(); | |
| 993 } else { | |
| 994 appendToken3(noType, offset); | |
| 995 return next; | |
| 996 } | |
| 997 } | |
| 998 int tokenizeAmpersand(int next) { | |
| 999 next = _reader.advance(); | |
| 1000 if (next == 0x26) { | |
| 1001 appendToken2(TokenType.AMPERSAND_AMPERSAND); | |
| 1002 return _reader.advance(); | |
| 1003 } else if (next == 0x3D) { | |
| 1004 appendToken2(TokenType.AMPERSAND_EQ); | |
| 1005 return _reader.advance(); | |
| 1006 } else { | |
| 1007 appendToken2(TokenType.AMPERSAND); | |
| 1008 return next; | |
| 1009 } | |
| 1010 } | |
| 1011 int tokenizeBar(int next) { | |
| 1012 next = _reader.advance(); | |
| 1013 if (next == 0x7C) { | |
| 1014 appendToken2(TokenType.BAR_BAR); | |
| 1015 return _reader.advance(); | |
| 1016 } else if (next == 0x3D) { | |
| 1017 appendToken2(TokenType.BAR_EQ); | |
| 1018 return _reader.advance(); | |
| 1019 } else { | |
| 1020 appendToken2(TokenType.BAR); | |
| 1021 return next; | |
| 1022 } | |
| 1023 } | |
| 1024 int tokenizeCaret(int next) => select(0x3D, TokenType.CARET_EQ, TokenType.CARE
T); | |
| 1025 int tokenizeDotOrNumber(int next) { | |
| 1026 int start = _reader.offset; | |
| 1027 next = _reader.advance(); | |
| 1028 if (0x30 <= next && next <= 0x39) { | |
| 1029 return tokenizeFractionPart(next, start); | |
| 1030 } else if (0x2E == next) { | |
| 1031 return select(0x2E, TokenType.PERIOD_PERIOD_PERIOD, TokenType.PERIOD_PERIO
D); | |
| 1032 } else { | |
| 1033 appendToken2(TokenType.PERIOD); | |
| 1034 return next; | |
| 1035 } | |
| 1036 } | |
| 1037 int tokenizeEquals(int next) { | |
| 1038 next = _reader.advance(); | |
| 1039 if (next == 0x3D) { | |
| 1040 appendToken2(TokenType.EQ_EQ); | |
| 1041 return _reader.advance(); | |
| 1042 } else if (next == 0x3E) { | |
| 1043 appendToken2(TokenType.FUNCTION); | |
| 1044 return _reader.advance(); | |
| 1045 } | |
| 1046 appendToken2(TokenType.EQ); | |
| 1047 return next; | |
| 1048 } | |
| 1049 int tokenizeExclamation(int next) { | |
| 1050 next = _reader.advance(); | |
| 1051 if (next == 0x3D) { | |
| 1052 appendToken2(TokenType.BANG_EQ); | |
| 1053 return _reader.advance(); | |
| 1054 } | |
| 1055 appendToken2(TokenType.BANG); | |
| 1056 return next; | |
| 1057 } | |
| 1058 int tokenizeExponent(int next) { | |
| 1059 if (next == 0x2B || next == 0x2D) { | |
| 1060 next = _reader.advance(); | |
| 1061 } | |
| 1062 bool hasDigits = false; | |
| 1063 while (true) { | |
| 1064 if (0x30 <= next && next <= 0x39) { | |
| 1065 hasDigits = true; | |
| 1066 } else { | |
| 1067 if (!hasDigits) { | |
| 1068 reportError(ScannerErrorCode.MISSING_DIGIT, []); | |
| 1069 } | |
| 1070 return next; | |
| 1071 } | |
| 1072 next = _reader.advance(); | |
| 1073 } | |
| 1074 } | |
| 1075 int tokenizeFractionPart(int next, int start) { | |
| 1076 bool done = false; | |
| 1077 bool hasDigit = false; | |
| 1078 LOOP: while (!done) { | |
| 1079 if (0x30 <= next && next <= 0x39) { | |
| 1080 hasDigit = true; | |
| 1081 } else if (0x65 == next || 0x45 == next) { | |
| 1082 hasDigit = true; | |
| 1083 next = tokenizeExponent(_reader.advance()); | |
| 1084 done = true; | |
| 1085 continue LOOP; | |
| 1086 } else { | |
| 1087 done = true; | |
| 1088 continue LOOP; | |
| 1089 } | |
| 1090 next = _reader.advance(); | |
| 1091 } | |
| 1092 if (!hasDigit) { | |
| 1093 appendStringToken(TokenType.INT, _reader.getString(start, -2)); | |
| 1094 if (0x2E == next) { | |
| 1095 return select2(0x2E, TokenType.PERIOD_PERIOD_PERIOD, TokenType.PERIOD_PE
RIOD, _reader.offset - 1); | |
| 1096 } | |
| 1097 appendToken3(TokenType.PERIOD, _reader.offset - 1); | |
| 1098 return bigSwitch(next); | |
| 1099 } | |
| 1100 appendStringToken(TokenType.DOUBLE, _reader.getString(start, next < 0 ? 0 :
-1)); | |
| 1101 return next; | |
| 1102 } | |
| 1103 int tokenizeGreaterThan(int next) { | |
| 1104 next = _reader.advance(); | |
| 1105 if (0x3D == next) { | |
| 1106 appendToken2(TokenType.GT_EQ); | |
| 1107 return _reader.advance(); | |
| 1108 } else if (0x3E == next) { | |
| 1109 next = _reader.advance(); | |
| 1110 if (0x3D == next) { | |
| 1111 appendToken2(TokenType.GT_GT_EQ); | |
| 1112 return _reader.advance(); | |
| 1113 } else { | |
| 1114 appendToken2(TokenType.GT_GT); | |
| 1115 return next; | |
| 1116 } | |
| 1117 } else { | |
| 1118 appendToken2(TokenType.GT); | |
| 1119 return next; | |
| 1120 } | |
| 1121 } | |
| 1122 int tokenizeHex(int next) { | |
| 1123 int start = _reader.offset - 1; | |
| 1124 bool hasDigits = false; | |
| 1125 while (true) { | |
| 1126 next = _reader.advance(); | |
| 1127 if ((0x30 <= next && next <= 0x39) || (0x41 <= next && next <= 0x46) || (0
x61 <= next && next <= 0x66)) { | |
| 1128 hasDigits = true; | |
| 1129 } else { | |
| 1130 if (!hasDigits) { | |
| 1131 reportError(ScannerErrorCode.MISSING_HEX_DIGIT, []); | |
| 1132 } | |
| 1133 appendStringToken(TokenType.HEXADECIMAL, _reader.getString(start, next <
0 ? 0 : -1)); | |
| 1134 return next; | |
| 1135 } | |
| 1136 } | |
| 1137 } | |
| 1138 int tokenizeHexOrNumber(int next) { | |
| 1139 int x = _reader.peek(); | |
| 1140 if (x == 0x78 || x == 0x58) { | |
| 1141 _reader.advance(); | |
| 1142 return tokenizeHex(x); | |
| 1143 } | |
| 1144 return tokenizeNumber(next); | |
| 1145 } | |
| 1146 int tokenizeIdentifier(int next, int start, bool allowDollar) { | |
| 1147 while ((0x61 <= next && next <= 0x7A) || (0x41 <= next && next <= 0x5A) || (
0x30 <= next && next <= 0x39) || next == 0x5F || (next == 0x24 && allowDollar))
{ | |
| 1148 next = _reader.advance(); | |
| 1149 } | |
| 1150 appendStringToken(TokenType.IDENTIFIER, _reader.getString(start, next < 0 ?
0 : -1)); | |
| 1151 return next; | |
| 1152 } | |
| 1153 int tokenizeInterpolatedExpression(int next, int start) { | |
| 1154 appendBeginToken(TokenType.STRING_INTERPOLATION_EXPRESSION); | |
| 1155 next = _reader.advance(); | |
| 1156 while (next != -1) { | |
| 1157 if (next == 0x7D) { | |
| 1158 BeginToken begin = findTokenMatchingClosingBraceInInterpolationExpressio
n(); | |
| 1159 if (begin == null) { | |
| 1160 beginToken(); | |
| 1161 appendToken2(TokenType.CLOSE_CURLY_BRACKET); | |
| 1162 next = _reader.advance(); | |
| 1163 beginToken(); | |
| 1164 return next; | |
| 1165 } else if (identical(begin.type, TokenType.OPEN_CURLY_BRACKET)) { | |
| 1166 beginToken(); | |
| 1167 appendEndToken(TokenType.CLOSE_CURLY_BRACKET, TokenType.OPEN_CURLY_BRA
CKET); | |
| 1168 next = _reader.advance(); | |
| 1169 beginToken(); | |
| 1170 } else if (identical(begin.type, TokenType.STRING_INTERPOLATION_EXPRESSI
ON)) { | |
| 1171 beginToken(); | |
| 1172 appendEndToken(TokenType.CLOSE_CURLY_BRACKET, TokenType.STRING_INTERPO
LATION_EXPRESSION); | |
| 1173 next = _reader.advance(); | |
| 1174 beginToken(); | |
| 1175 return next; | |
| 1176 } | |
| 1177 } else { | |
| 1178 next = bigSwitch(next); | |
| 1179 } | |
| 1180 } | |
| 1181 if (next == -1) { | |
| 1182 return next; | |
| 1183 } | |
| 1184 next = _reader.advance(); | |
| 1185 beginToken(); | |
| 1186 return next; | |
| 1187 } | |
| 1188 int tokenizeInterpolatedIdentifier(int next, int start) { | |
| 1189 appendStringToken2(TokenType.STRING_INTERPOLATION_IDENTIFIER, "\$", 0); | |
| 1190 if ((0x41 <= next && next <= 0x5A) || (0x61 <= next && next <= 0x7A) || next
== 0x5F) { | |
| 1191 beginToken(); | |
| 1192 next = tokenizeKeywordOrIdentifier(next, false); | |
| 1193 } | |
| 1194 beginToken(); | |
| 1195 return next; | |
| 1196 } | |
| 1197 int tokenizeKeywordOrIdentifier(int next, bool allowDollar) { | |
| 1198 KeywordState state = KeywordState.KEYWORD_STATE; | |
| 1199 int start = _reader.offset; | |
| 1200 while (state != null && 0x61 <= next && next <= 0x7A) { | |
| 1201 state = state.next(next as int); | |
| 1202 next = _reader.advance(); | |
| 1203 } | |
| 1204 if (state == null || state.keyword() == null) { | |
| 1205 return tokenizeIdentifier(next, start, allowDollar); | |
| 1206 } | |
| 1207 if ((0x41 <= next && next <= 0x5A) || (0x30 <= next && next <= 0x39) || next
== 0x5F || next == 0x24) { | |
| 1208 return tokenizeIdentifier(next, start, allowDollar); | |
| 1209 } else if (next < 128) { | |
| 1210 appendKeywordToken(state.keyword()); | |
| 1211 return next; | |
| 1212 } else { | |
| 1213 return tokenizeIdentifier(next, start, allowDollar); | |
| 1214 } | |
| 1215 } | |
| 1216 int tokenizeLessThan(int next) { | |
| 1217 next = _reader.advance(); | |
| 1218 if (0x3D == next) { | |
| 1219 appendToken2(TokenType.LT_EQ); | |
| 1220 return _reader.advance(); | |
| 1221 } else if (0x3C == next) { | |
| 1222 return select(0x3D, TokenType.LT_LT_EQ, TokenType.LT_LT); | |
| 1223 } else { | |
| 1224 appendToken2(TokenType.LT); | |
| 1225 return next; | |
| 1226 } | |
| 1227 } | |
| 1228 int tokenizeMinus(int next) { | |
| 1229 next = _reader.advance(); | |
| 1230 if (next == 0x2D) { | |
| 1231 appendToken2(TokenType.MINUS_MINUS); | |
| 1232 return _reader.advance(); | |
| 1233 } else if (next == 0x3D) { | |
| 1234 appendToken2(TokenType.MINUS_EQ); | |
| 1235 return _reader.advance(); | |
| 1236 } else { | |
| 1237 appendToken2(TokenType.MINUS); | |
| 1238 return next; | |
| 1239 } | |
| 1240 } | |
| 1241 int tokenizeMultiLineComment(int next) { | |
| 1242 int nesting = 1; | |
| 1243 next = _reader.advance(); | |
| 1244 while (true) { | |
| 1245 if (-1 == next) { | |
| 1246 reportError(ScannerErrorCode.UNTERMINATED_MULTI_LINE_COMMENT, []); | |
| 1247 appendCommentToken(TokenType.MULTI_LINE_COMMENT, _reader.getString(_toke
nStart, 0)); | |
| 1248 return next; | |
| 1249 } else if (0x2A == next) { | |
| 1250 next = _reader.advance(); | |
| 1251 if (0x2F == next) { | |
| 1252 --nesting; | |
| 1253 if (0 == nesting) { | |
| 1254 appendCommentToken(TokenType.MULTI_LINE_COMMENT, _reader.getString(_
tokenStart, 0)); | |
| 1255 return _reader.advance(); | |
| 1256 } else { | |
| 1257 next = _reader.advance(); | |
| 1258 } | |
| 1259 } | |
| 1260 } else if (0x2F == next) { | |
| 1261 next = _reader.advance(); | |
| 1262 if (0x2A == next) { | |
| 1263 next = _reader.advance(); | |
| 1264 ++nesting; | |
| 1265 } | |
| 1266 } else if (next == 0xD) { | |
| 1267 next = _reader.advance(); | |
| 1268 if (next == 0xA) { | |
| 1269 next = _reader.advance(); | |
| 1270 } | |
| 1271 recordStartOfLine(); | |
| 1272 } else if (next == 0xA) { | |
| 1273 recordStartOfLine(); | |
| 1274 next = _reader.advance(); | |
| 1275 } else { | |
| 1276 next = _reader.advance(); | |
| 1277 } | |
| 1278 } | |
| 1279 } | |
| 1280 int tokenizeMultiLineRawString(int quoteChar, int start) { | |
| 1281 int next = _reader.advance(); | |
| 1282 outer: while (next != -1) { | |
| 1283 while (next != quoteChar) { | |
| 1284 next = _reader.advance(); | |
| 1285 if (next == -1) { | |
| 1286 break outer; | |
| 1287 } else if (next == 0xD) { | |
| 1288 next = _reader.advance(); | |
| 1289 if (next == 0xA) { | |
| 1290 next = _reader.advance(); | |
| 1291 } | |
| 1292 recordStartOfLine(); | |
| 1293 } else if (next == 0xA) { | |
| 1294 recordStartOfLine(); | |
| 1295 next = _reader.advance(); | |
| 1296 } | |
| 1297 } | |
| 1298 next = _reader.advance(); | |
| 1299 if (next == quoteChar) { | |
| 1300 next = _reader.advance(); | |
| 1301 if (next == quoteChar) { | |
| 1302 appendStringToken(TokenType.STRING, _reader.getString(start, 0)); | |
| 1303 return _reader.advance(); | |
| 1304 } | |
| 1305 } | |
| 1306 } | |
| 1307 reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL, []); | |
| 1308 appendStringToken(TokenType.STRING, _reader.getString(start, 0)); | |
| 1309 return _reader.advance(); | |
| 1310 } | |
| 1311 int tokenizeMultiLineString(int quoteChar, int start, bool raw) { | |
| 1312 if (raw) { | |
| 1313 return tokenizeMultiLineRawString(quoteChar, start); | |
| 1314 } | |
| 1315 int next = _reader.advance(); | |
| 1316 while (next != -1) { | |
| 1317 if (next == 0x24) { | |
| 1318 appendStringToken(TokenType.STRING, _reader.getString(start, -1)); | |
| 1319 beginToken(); | |
| 1320 next = tokenizeStringInterpolation(start); | |
| 1321 start = _reader.offset; | |
| 1322 continue; | |
| 1323 } | |
| 1324 if (next == quoteChar) { | |
| 1325 next = _reader.advance(); | |
| 1326 if (next == quoteChar) { | |
| 1327 next = _reader.advance(); | |
| 1328 if (next == quoteChar) { | |
| 1329 appendStringToken(TokenType.STRING, _reader.getString(start, 0)); | |
| 1330 return _reader.advance(); | |
| 1331 } | |
| 1332 } | |
| 1333 continue; | |
| 1334 } | |
| 1335 if (next == 0x5C) { | |
| 1336 next = _reader.advance(); | |
| 1337 if (next == -1) { | |
| 1338 break; | |
| 1339 } | |
| 1340 bool missingCharacter = false; | |
| 1341 if (next == 0xD) { | |
| 1342 missingCharacter = true; | |
| 1343 next = _reader.advance(); | |
| 1344 if (next == 0xA) { | |
| 1345 next = _reader.advance(); | |
| 1346 } | |
| 1347 recordStartOfLine(); | |
| 1348 } else if (next == 0xA) { | |
| 1349 missingCharacter = true; | |
| 1350 recordStartOfLine(); | |
| 1351 next = _reader.advance(); | |
| 1352 } else { | |
| 1353 next = _reader.advance(); | |
| 1354 } | |
| 1355 if (missingCharacter) { | |
| 1356 _errorListener.onError(new AnalysisError.con2(source, _reader.offset -
1, 1, ScannerErrorCode.CHARACTER_EXPECTED_AFTER_SLASH, [])); | |
| 1357 } | |
| 1358 } else if (next == 0xD) { | |
| 1359 next = _reader.advance(); | |
| 1360 if (next == 0xA) { | |
| 1361 next = _reader.advance(); | |
| 1362 } | |
| 1363 recordStartOfLine(); | |
| 1364 } else if (next == 0xA) { | |
| 1365 recordStartOfLine(); | |
| 1366 next = _reader.advance(); | |
| 1367 } else { | |
| 1368 next = _reader.advance(); | |
| 1369 } | |
| 1370 } | |
| 1371 reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL, []); | |
| 1372 appendStringToken(TokenType.STRING, _reader.getString(start, 0)); | |
| 1373 return _reader.advance(); | |
| 1374 } | |
| 1375 int tokenizeMultiply(int next) => select(0x3D, TokenType.STAR_EQ, TokenType.ST
AR); | |
| 1376 int tokenizeNumber(int next) { | |
| 1377 int start = _reader.offset; | |
| 1378 while (true) { | |
| 1379 next = _reader.advance(); | |
| 1380 if (0x30 <= next && next <= 0x39) { | |
| 1381 continue; | |
| 1382 } else if (next == 0x2E) { | |
| 1383 return tokenizeFractionPart(_reader.advance(), start); | |
| 1384 } else if (next == 0x65 || next == 0x45) { | |
| 1385 return tokenizeFractionPart(next, start); | |
| 1386 } else { | |
| 1387 appendStringToken(TokenType.INT, _reader.getString(start, next < 0 ? 0 :
-1)); | |
| 1388 return next; | |
| 1389 } | |
| 1390 } | |
| 1391 } | |
| 1392 int tokenizeOpenSquareBracket(int next) { | |
| 1393 next = _reader.advance(); | |
| 1394 if (next == 0x5D) { | |
| 1395 return select(0x3D, TokenType.INDEX_EQ, TokenType.INDEX); | |
| 1396 } else { | |
| 1397 appendBeginToken(TokenType.OPEN_SQUARE_BRACKET); | |
| 1398 return next; | |
| 1399 } | |
| 1400 } | |
| 1401 int tokenizePercent(int next) => select(0x3D, TokenType.PERCENT_EQ, TokenType.
PERCENT); | |
| 1402 int tokenizePlus(int next) { | |
| 1403 next = _reader.advance(); | |
| 1404 if (0x2B == next) { | |
| 1405 appendToken2(TokenType.PLUS_PLUS); | |
| 1406 return _reader.advance(); | |
| 1407 } else if (0x3D == next) { | |
| 1408 appendToken2(TokenType.PLUS_EQ); | |
| 1409 return _reader.advance(); | |
| 1410 } else { | |
| 1411 appendToken2(TokenType.PLUS); | |
| 1412 return next; | |
| 1413 } | |
| 1414 } | |
| 1415 int tokenizeSingleLineComment(int next) { | |
| 1416 while (true) { | |
| 1417 next = _reader.advance(); | |
| 1418 if (-1 == next) { | |
| 1419 appendCommentToken(TokenType.SINGLE_LINE_COMMENT, _reader.getString(_tok
enStart, 0)); | |
| 1420 return next; | |
| 1421 } else if (0xA == next || 0xD == next) { | |
| 1422 appendCommentToken(TokenType.SINGLE_LINE_COMMENT, _reader.getString(_tok
enStart, -1)); | |
| 1423 return next; | |
| 1424 } | |
| 1425 } | |
| 1426 } | |
| 1427 int tokenizeSingleLineRawString(int next, int quoteChar, int start) { | |
| 1428 next = _reader.advance(); | |
| 1429 while (next != -1) { | |
| 1430 if (next == quoteChar) { | |
| 1431 appendStringToken(TokenType.STRING, _reader.getString(start, 0)); | |
| 1432 return _reader.advance(); | |
| 1433 } else if (next == 0xD || next == 0xA) { | |
| 1434 reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL, []); | |
| 1435 appendStringToken(TokenType.STRING, _reader.getString(start, 0)); | |
| 1436 return _reader.advance(); | |
| 1437 } | |
| 1438 next = _reader.advance(); | |
| 1439 } | |
| 1440 reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL, []); | |
| 1441 appendStringToken(TokenType.STRING, _reader.getString(start, 0)); | |
| 1442 return _reader.advance(); | |
| 1443 } | |
| 1444 int tokenizeSingleLineString(int next, int quoteChar, int start) { | |
| 1445 while (next != quoteChar) { | |
| 1446 if (next == 0x5C) { | |
| 1447 next = _reader.advance(); | |
| 1448 } else if (next == 0x24) { | |
| 1449 appendStringToken(TokenType.STRING, _reader.getString(start, -1)); | |
| 1450 beginToken(); | |
| 1451 next = tokenizeStringInterpolation(start); | |
| 1452 start = _reader.offset; | |
| 1453 continue; | |
| 1454 } | |
| 1455 if (next <= 0xD && (next == 0xA || next == 0xD || next == -1)) { | |
| 1456 reportError(ScannerErrorCode.UNTERMINATED_STRING_LITERAL, []); | |
| 1457 appendStringToken(TokenType.STRING, _reader.getString(start, 0)); | |
| 1458 return _reader.advance(); | |
| 1459 } | |
| 1460 next = _reader.advance(); | |
| 1461 } | |
| 1462 appendStringToken(TokenType.STRING, _reader.getString(start, 0)); | |
| 1463 return _reader.advance(); | |
| 1464 } | |
| 1465 int tokenizeSlashOrComment(int next) { | |
| 1466 next = _reader.advance(); | |
| 1467 if (0x2A == next) { | |
| 1468 return tokenizeMultiLineComment(next); | |
| 1469 } else if (0x2F == next) { | |
| 1470 return tokenizeSingleLineComment(next); | |
| 1471 } else if (0x3D == next) { | |
| 1472 appendToken2(TokenType.SLASH_EQ); | |
| 1473 return _reader.advance(); | |
| 1474 } else { | |
| 1475 appendToken2(TokenType.SLASH); | |
| 1476 return next; | |
| 1477 } | |
| 1478 } | |
| 1479 int tokenizeString(int next, int start, bool raw) { | |
| 1480 int quoteChar = next; | |
| 1481 next = _reader.advance(); | |
| 1482 if (quoteChar == next) { | |
| 1483 next = _reader.advance(); | |
| 1484 if (quoteChar == next) { | |
| 1485 return tokenizeMultiLineString(quoteChar, start, raw); | |
| 1486 } else { | |
| 1487 appendStringToken(TokenType.STRING, _reader.getString(start, -1)); | |
| 1488 return next; | |
| 1489 } | |
| 1490 } | |
| 1491 if (raw) { | |
| 1492 return tokenizeSingleLineRawString(next, quoteChar, start); | |
| 1493 } else { | |
| 1494 return tokenizeSingleLineString(next, quoteChar, start); | |
| 1495 } | |
| 1496 } | |
| 1497 int tokenizeStringInterpolation(int start) { | |
| 1498 beginToken(); | |
| 1499 int next = _reader.advance(); | |
| 1500 if (next == 0x7B) { | |
| 1501 return tokenizeInterpolatedExpression(next, start); | |
| 1502 } else { | |
| 1503 return tokenizeInterpolatedIdentifier(next, start); | |
| 1504 } | |
| 1505 } | |
| 1506 int tokenizeTag(int next) { | |
| 1507 if (_reader.offset == 0) { | |
| 1508 if (_reader.peek() == 0x21) { | |
| 1509 do { | |
| 1510 next = _reader.advance(); | |
| 1511 } while (next != 0xA && next != 0xD && next > 0); | |
| 1512 appendStringToken(TokenType.SCRIPT_TAG, _reader.getString(_tokenStart, 0
)); | |
| 1513 return next; | |
| 1514 } | |
| 1515 } | |
| 1516 appendToken2(TokenType.HASH); | |
| 1517 return _reader.advance(); | |
| 1518 } | |
| 1519 int tokenizeTilde(int next) { | |
| 1520 next = _reader.advance(); | |
| 1521 if (next == 0x2F) { | |
| 1522 return select(0x3D, TokenType.TILDE_SLASH_EQ, TokenType.TILDE_SLASH); | |
| 1523 } else { | |
| 1524 appendToken2(TokenType.TILDE); | |
| 1525 return next; | |
| 1526 } | |
| 1527 } | |
| 1528 } | |
| 1529 /** | |
| 1530 * Instances of the class `StringToken` represent a token whose value is indepen
dent of it's | |
| 1531 * type. | |
| 1532 * | |
| 1533 * @coverage dart.engine.parser | |
| 1534 */ | |
| 1535 class StringToken extends Token { | |
| 1536 | |
| 1537 /** | |
| 1538 * The lexeme represented by this token. | |
| 1539 */ | |
| 1540 String _value2; | |
| 1541 | |
| 1542 /** | |
| 1543 * Initialize a newly created token to represent a token of the given type wit
h the given value. | |
| 1544 * | |
| 1545 * @param type the type of the token | |
| 1546 * @param value the lexeme represented by this token | |
| 1547 * @param offset the offset from the beginning of the file to the first charac
ter in the token | |
| 1548 */ | |
| 1549 StringToken(TokenType type, String value, int offset) : super(type, offset) { | |
| 1550 this._value2 = StringUtilities.intern(value); | |
| 1551 } | |
| 1552 Token copy() => new StringToken(type, _value2, offset); | |
| 1553 String get lexeme => _value2; | |
| 1554 String value() => _value2; | |
| 1555 } | |
| 1556 /** | |
| 1557 * Instances of the class `TokenWithComment` represent a normal token that is pr
eceded by | |
| 1558 * comments. | |
| 1559 * | |
| 1560 * @coverage dart.engine.parser | |
| 1561 */ | |
| 1562 class TokenWithComment extends Token { | |
| 1563 | |
| 1564 /** | |
| 1565 * The first comment in the list of comments that precede this token. | |
| 1566 */ | |
| 1567 Token _precedingComment; | |
| 1568 | |
| 1569 /** | |
| 1570 * Initialize a newly created token to have the given type and offset and to b
e preceded by the | |
| 1571 * comments reachable from the given comment. | |
| 1572 * | |
| 1573 * @param type the type of the token | |
| 1574 * @param offset the offset from the beginning of the file to the first charac
ter in the token | |
| 1575 * @param precedingComment the first comment in the list of comments that prec
ede this token | |
| 1576 */ | |
| 1577 TokenWithComment(TokenType type, int offset, Token precedingComment) : super(t
ype, offset) { | |
| 1578 this._precedingComment = precedingComment; | |
| 1579 } | |
| 1580 Token copy() => new TokenWithComment(type, offset, _precedingComment); | |
| 1581 Token get precedingComments => _precedingComment; | |
| 1582 } | |
| 1583 /** | |
| 1584 * Instances of the class `Token` represent a token that was scanned from the in
put. Each | |
| 1585 * token knows which token follows it, acting as the head of a linked list of to
kens. | |
| 1586 * | |
| 1587 * @coverage dart.engine.parser | |
| 1588 */ | |
| 1589 class Token { | |
| 1590 | |
| 1591 /** | |
| 1592 * The type of the token. | |
| 1593 */ | |
| 1594 TokenType type; | |
| 1595 | |
| 1596 /** | |
| 1597 * The offset from the beginning of the file to the first character in the tok
en. | |
| 1598 */ | |
| 1599 int offset = 0; | |
| 1600 | |
| 1601 /** | |
| 1602 * The previous token in the token stream. | |
| 1603 */ | |
| 1604 Token previous; | |
| 1605 | |
| 1606 /** | |
| 1607 * The next token in the token stream. | |
| 1608 */ | |
| 1609 Token next; | |
| 1610 | |
| 1611 /** | |
| 1612 * Initialize a newly created token to have the given type and offset. | |
| 1613 * | |
| 1614 * @param type the type of the token | |
| 1615 * @param offset the offset from the beginning of the file to the first charac
ter in the token | |
| 1616 */ | |
| 1617 Token(TokenType type, int offset) { | |
| 1618 this.type = type; | |
| 1619 this.offset = offset; | |
| 1620 } | |
| 1621 | |
| 1622 /** | |
| 1623 * Return a newly created token that is a copy of this token but that is not a
part of any token | |
| 1624 * stream. | |
| 1625 * | |
| 1626 * @return a newly created token that is a copy of this token | |
| 1627 */ | |
| 1628 Token copy() => new Token(type, offset); | |
| 1629 | |
| 1630 /** | |
| 1631 * Return the offset from the beginning of the file to the character after las
t character of the | |
| 1632 * token. | |
| 1633 * | |
| 1634 * @return the offset from the beginning of the file to the first character af
ter last character | |
| 1635 * of the token | |
| 1636 */ | |
| 1637 int get end => offset + length; | |
| 1638 | |
| 1639 /** | |
| 1640 * Return the number of characters in the node's source range. | |
| 1641 * | |
| 1642 * @return the number of characters in the node's source range | |
| 1643 */ | |
| 1644 int get length => lexeme.length; | |
| 1645 | |
| 1646 /** | |
| 1647 * Return the lexeme that represents this token. | |
| 1648 * | |
| 1649 * @return the lexeme that represents this token | |
| 1650 */ | |
| 1651 String get lexeme => type.lexeme; | |
| 1652 | |
| 1653 /** | |
| 1654 * Return the first comment in the list of comments that precede this token, o
r `null` if | |
| 1655 * there are no comments preceding this token. Additional comments can be reac
hed by following the | |
| 1656 * token stream using [getNext] until `null` is returned. | |
| 1657 * | |
| 1658 * @return the first comment in the list of comments that precede this token | |
| 1659 */ | |
| 1660 Token get precedingComments => null; | |
| 1661 | |
| 1662 /** | |
| 1663 * Return `true` if this token represents an operator. | |
| 1664 * | |
| 1665 * @return `true` if this token represents an operator | |
| 1666 */ | |
| 1667 bool get isOperator => type.isOperator; | |
| 1668 | |
| 1669 /** | |
| 1670 * Return `true` if this token is a synthetic token. A synthetic token is a to
ken that was | |
| 1671 * introduced by the parser in order to recover from an error in the code. Syn
thetic tokens always | |
| 1672 * have a length of zero (`0`). | |
| 1673 * | |
| 1674 * @return `true` if this token is a synthetic token | |
| 1675 */ | |
| 1676 bool get isSynthetic => length == 0; | |
| 1677 | |
| 1678 /** | |
| 1679 * Return `true` if this token represents an operator that can be defined by u
sers. | |
| 1680 * | |
| 1681 * @return `true` if this token represents an operator that can be defined by
users | |
| 1682 */ | |
| 1683 bool get isUserDefinableOperator => type.isUserDefinableOperator; | |
| 1684 | |
| 1685 /** | |
| 1686 * Set the next token in the token stream to the given token. This has the sid
e-effect of setting | |
| 1687 * this token to be the previous token for the given token. | |
| 1688 * | |
| 1689 * @param token the next token in the token stream | |
| 1690 * @return the token that was passed in | |
| 1691 */ | |
| 1692 Token setNext(Token token) { | |
| 1693 next = token; | |
| 1694 token.previous = this; | |
| 1695 return token; | |
| 1696 } | |
| 1697 | |
| 1698 /** | |
| 1699 * Set the next token in the token stream to the given token without changing
which token is the | |
| 1700 * previous token for the given token. | |
| 1701 * | |
| 1702 * @param token the next token in the token stream | |
| 1703 * @return the token that was passed in | |
| 1704 */ | |
| 1705 Token setNextWithoutSettingPrevious(Token token) { | |
| 1706 next = token; | |
| 1707 return token; | |
| 1708 } | |
| 1709 String toString() => lexeme; | |
| 1710 | |
| 1711 /** | |
| 1712 * Return the value of this token. For keyword tokens, this is the keyword ass
ociated with the | |
| 1713 * token, for other tokens it is the lexeme associated with the token. | |
| 1714 * | |
| 1715 * @return the value of this token | |
| 1716 */ | |
| 1717 Object value() => type.lexeme; | |
| 1718 | |
| 1719 /** | |
| 1720 * Apply (add) the given delta to this token's offset. | |
| 1721 * | |
| 1722 * @param delta the amount by which the offset is to be adjusted | |
| 1723 */ | |
| 1724 void applyDelta(int delta) { | |
| 1725 offset += delta; | |
| 1726 } | |
| 1727 | |
| 1728 /** | |
| 1729 * Copy a linked list of comment tokens identical to the given comment tokens. | |
| 1730 * | |
| 1731 * @param token the first token in the list, or `null` if there are no tokens
to be copied | |
| 1732 * @return the tokens that were created | |
| 1733 */ | |
| 1734 Token copyComments(Token token) { | |
| 1735 if (token == null) { | |
| 1736 return null; | |
| 1737 } | |
| 1738 Token head = token.copy(); | |
| 1739 Token tail = head; | |
| 1740 token = token.next; | |
| 1741 while (token != null) { | |
| 1742 tail = tail.setNext(token.copy()); | |
| 1743 } | |
| 1744 return head; | |
| 1745 } | |
| 1746 } | |
| 1747 /** | |
| 1748 * The interface `CharacterReader` | |
| 1749 */ | |
| 1750 abstract class CharacterReader { | |
| 1751 | |
| 1752 /** | |
| 1753 * Advance the current position and return the character at the new current po
sition. | |
| 1754 * | |
| 1755 * @return the character at the new current position | |
| 1756 */ | |
| 1757 int advance(); | |
| 1758 | |
| 1759 /** | |
| 1760 * Return the current offset relative to the beginning of the source. Return t
he initial offset if | |
| 1761 * the scanner has not yet scanned the source code, and one (1) past the end o
f the source code if | |
| 1762 * the entire source code has been scanned. | |
| 1763 * | |
| 1764 * @return the current offset of the scanner in the source | |
| 1765 */ | |
| 1766 int get offset; | |
| 1767 | |
| 1768 /** | |
| 1769 * Return the substring of the source code between the start offset and the mo
dified current | |
| 1770 * position. The current position is modified by adding the end delta. | |
| 1771 * | |
| 1772 * @param start the offset to the beginning of the string, relative to the sta
rt of the file | |
| 1773 * @param endDelta the number of characters after the current location to be i
ncluded in the | |
| 1774 * string, or the number of characters before the current location to
be excluded if the | |
| 1775 * offset is negative | |
| 1776 * @return the specified substring of the source code | |
| 1777 */ | |
| 1778 String getString(int start, int endDelta); | |
| 1779 | |
| 1780 /** | |
| 1781 * Return the character at the current position without changing the current p
osition. | |
| 1782 * | |
| 1783 * @return the character at the current position | |
| 1784 */ | |
| 1785 int peek(); | |
| 1786 | |
| 1787 /** | |
| 1788 * Set the current offset relative to the beginning of the source. The new off
set must be between | |
| 1789 * the initial offset and one (1) past the end of the source code. | |
| 1790 * | |
| 1791 * @param offset the new offset in the source | |
| 1792 */ | |
| 1793 void set offset(int offset); | |
| 1794 } | |
| 1795 /** | |
| 1796 * Instances of the class `BeginTokenWithComment` represent a begin token that i
s preceded by | |
| 1797 * comments. | |
| 1798 * | |
| 1799 * @coverage dart.engine.parser | |
| 1800 */ | |
| 1801 class BeginTokenWithComment extends BeginToken { | |
| 1802 | |
| 1803 /** | |
| 1804 * The first comment in the list of comments that precede this token. | |
| 1805 */ | |
| 1806 Token _precedingComment; | |
| 1807 | |
| 1808 /** | |
| 1809 * Initialize a newly created token to have the given type and offset and to b
e preceded by the | |
| 1810 * comments reachable from the given comment. | |
| 1811 * | |
| 1812 * @param type the type of the token | |
| 1813 * @param offset the offset from the beginning of the file to the first charac
ter in the token | |
| 1814 * @param precedingComment the first comment in the list of comments that prec
ede this token | |
| 1815 */ | |
| 1816 BeginTokenWithComment(TokenType type, int offset, Token precedingComment) : su
per(type, offset) { | |
| 1817 this._precedingComment = precedingComment; | |
| 1818 } | |
| 1819 Token copy() => new BeginTokenWithComment(type, offset, copyComments(_precedin
gComment)); | |
| 1820 Token get precedingComments => _precedingComment; | |
| 1821 void applyDelta(int delta) { | |
| 1822 super.applyDelta(delta); | |
| 1823 Token token = _precedingComment; | |
| 1824 while (token != null) { | |
| 1825 token.applyDelta(delta); | |
| 1826 token = token.next; | |
| 1827 } | |
| 1828 } | |
| 1829 } | |
| 1830 /** | |
| 1831 * Instances of the class `KeywordToken` represent a keyword in the language. | |
| 1832 * | |
| 1833 * @coverage dart.engine.parser | |
| 1834 */ | |
| 1835 class KeywordToken extends Token { | |
| 1836 | |
| 1837 /** | |
| 1838 * The keyword being represented by this token. | |
| 1839 */ | |
| 1840 Keyword keyword; | |
| 1841 | |
| 1842 /** | |
| 1843 * Initialize a newly created token to represent the given keyword. | |
| 1844 * | |
| 1845 * @param keyword the keyword being represented by this token | |
| 1846 * @param offset the offset from the beginning of the file to the first charac
ter in the token | |
| 1847 */ | |
| 1848 KeywordToken(Keyword keyword, int offset) : super(TokenType.KEYWORD, offset) { | |
| 1849 this.keyword = keyword; | |
| 1850 } | |
| 1851 Token copy() => new KeywordToken(keyword, offset); | |
| 1852 String get lexeme => keyword.syntax; | |
| 1853 Keyword value() => keyword; | |
| 1854 } | |
| 1855 /** | |
| 1856 * Instances of the class `BeginToken` represent the opening half of a grouping
pair of | |
| 1857 * tokens. This is used for curly brackets ('{'), parentheses ('('), and square
brackets ('['). | |
| 1858 * | |
| 1859 * @coverage dart.engine.parser | |
| 1860 */ | |
| 1861 class BeginToken extends Token { | |
| 1862 | |
| 1863 /** | |
| 1864 * The token that corresponds to this token. | |
| 1865 */ | |
| 1866 Token endToken; | |
| 1867 | |
| 1868 /** | |
| 1869 * Initialize a newly created token representing the opening half of a groupin
g pair of tokens. | |
| 1870 * | |
| 1871 * @param type the type of the token | |
| 1872 * @param offset the offset from the beginning of the file to the first charac
ter in the token | |
| 1873 */ | |
| 1874 BeginToken(TokenType type, int offset) : super(type, offset) { | |
| 1875 assert((identical(type, TokenType.OPEN_CURLY_BRACKET) || identical(type, Tok
enType.OPEN_PAREN) || identical(type, TokenType.OPEN_SQUARE_BRACKET) || identica
l(type, TokenType.STRING_INTERPOLATION_EXPRESSION))); | |
| 1876 } | |
| 1877 Token copy() => new BeginToken(type, offset); | |
| 1878 } | |
| 1879 /** | |
| 1880 * The enumeration `TokenClass` represents classes (or groups) of tokens with a
similar use. | |
| 1881 * | |
| 1882 * @coverage dart.engine.parser | |
| 1883 */ | |
| 1884 class TokenClass extends Enum<TokenClass> { | |
| 1885 | |
| 1886 /** | |
| 1887 * A value used to indicate that the token type is not part of any specific cl
ass of token. | |
| 1888 */ | |
| 1889 static final TokenClass NO_CLASS = new TokenClass.con1('NO_CLASS', 0); | |
| 1890 | |
| 1891 /** | |
| 1892 * A value used to indicate that the token type is an additive operator. | |
| 1893 */ | |
| 1894 static final TokenClass ADDITIVE_OPERATOR = new TokenClass.con2('ADDITIVE_OPER
ATOR', 1, 12); | |
| 1895 | |
| 1896 /** | |
| 1897 * A value used to indicate that the token type is an assignment operator. | |
| 1898 */ | |
| 1899 static final TokenClass ASSIGNMENT_OPERATOR = new TokenClass.con2('ASSIGNMENT_
OPERATOR', 2, 1); | |
| 1900 | |
| 1901 /** | |
| 1902 * A value used to indicate that the token type is a bitwise-and operator. | |
| 1903 */ | |
| 1904 static final TokenClass BITWISE_AND_OPERATOR = new TokenClass.con2('BITWISE_AN
D_OPERATOR', 3, 8); | |
| 1905 | |
| 1906 /** | |
| 1907 * A value used to indicate that the token type is a bitwise-or operator. | |
| 1908 */ | |
| 1909 static final TokenClass BITWISE_OR_OPERATOR = new TokenClass.con2('BITWISE_OR_
OPERATOR', 4, 6); | |
| 1910 | |
| 1911 /** | |
| 1912 * A value used to indicate that the token type is a bitwise-xor operator. | |
| 1913 */ | |
| 1914 static final TokenClass BITWISE_XOR_OPERATOR = new TokenClass.con2('BITWISE_XO
R_OPERATOR', 5, 7); | |
| 1915 | |
| 1916 /** | |
| 1917 * A value used to indicate that the token type is a cascade operator. | |
| 1918 */ | |
| 1919 static final TokenClass CASCADE_OPERATOR = new TokenClass.con2('CASCADE_OPERAT
OR', 6, 2); | |
| 1920 | |
| 1921 /** | |
| 1922 * A value used to indicate that the token type is a conditional operator. | |
| 1923 */ | |
| 1924 static final TokenClass CONDITIONAL_OPERATOR = new TokenClass.con2('CONDITIONA
L_OPERATOR', 7, 3); | |
| 1925 | |
| 1926 /** | |
| 1927 * A value used to indicate that the token type is an equality operator. | |
| 1928 */ | |
| 1929 static final TokenClass EQUALITY_OPERATOR = new TokenClass.con2('EQUALITY_OPER
ATOR', 8, 9); | |
| 1930 | |
| 1931 /** | |
| 1932 * A value used to indicate that the token type is a logical-and operator. | |
| 1933 */ | |
| 1934 static final TokenClass LOGICAL_AND_OPERATOR = new TokenClass.con2('LOGICAL_AN
D_OPERATOR', 9, 5); | |
| 1935 | |
| 1936 /** | |
| 1937 * A value used to indicate that the token type is a logical-or operator. | |
| 1938 */ | |
| 1939 static final TokenClass LOGICAL_OR_OPERATOR = new TokenClass.con2('LOGICAL_OR_
OPERATOR', 10, 4); | |
| 1940 | |
| 1941 /** | |
| 1942 * A value used to indicate that the token type is a multiplicative operator. | |
| 1943 */ | |
| 1944 static final TokenClass MULTIPLICATIVE_OPERATOR = new TokenClass.con2('MULTIPL
ICATIVE_OPERATOR', 11, 13); | |
| 1945 | |
| 1946 /** | |
| 1947 * A value used to indicate that the token type is a relational operator. | |
| 1948 */ | |
| 1949 static final TokenClass RELATIONAL_OPERATOR = new TokenClass.con2('RELATIONAL_
OPERATOR', 12, 10); | |
| 1950 | |
| 1951 /** | |
| 1952 * A value used to indicate that the token type is a shift operator. | |
| 1953 */ | |
| 1954 static final TokenClass SHIFT_OPERATOR = new TokenClass.con2('SHIFT_OPERATOR',
13, 11); | |
| 1955 | |
| 1956 /** | |
| 1957 * A value used to indicate that the token type is a unary operator. | |
| 1958 */ | |
| 1959 static final TokenClass UNARY_POSTFIX_OPERATOR = new TokenClass.con2('UNARY_PO
STFIX_OPERATOR', 14, 15); | |
| 1960 | |
| 1961 /** | |
| 1962 * A value used to indicate that the token type is a unary operator. | |
| 1963 */ | |
| 1964 static final TokenClass UNARY_PREFIX_OPERATOR = new TokenClass.con2('UNARY_PRE
FIX_OPERATOR', 15, 14); | |
| 1965 static final List<TokenClass> values = [ | |
| 1966 NO_CLASS, | |
| 1967 ADDITIVE_OPERATOR, | |
| 1968 ASSIGNMENT_OPERATOR, | |
| 1969 BITWISE_AND_OPERATOR, | |
| 1970 BITWISE_OR_OPERATOR, | |
| 1971 BITWISE_XOR_OPERATOR, | |
| 1972 CASCADE_OPERATOR, | |
| 1973 CONDITIONAL_OPERATOR, | |
| 1974 EQUALITY_OPERATOR, | |
| 1975 LOGICAL_AND_OPERATOR, | |
| 1976 LOGICAL_OR_OPERATOR, | |
| 1977 MULTIPLICATIVE_OPERATOR, | |
| 1978 RELATIONAL_OPERATOR, | |
| 1979 SHIFT_OPERATOR, | |
| 1980 UNARY_POSTFIX_OPERATOR, | |
| 1981 UNARY_PREFIX_OPERATOR]; | |
| 1982 | |
| 1983 /** | |
| 1984 * The precedence of tokens of this class, or `0` if the such tokens do not re
present an | |
| 1985 * operator. | |
| 1986 */ | |
| 1987 int precedence = 0; | |
| 1988 TokenClass.con1(String name, int ordinal) : this.con2(name, ordinal, 0); | |
| 1989 TokenClass.con2(String name, int ordinal, int precedence) : super(name, ordina
l) { | |
| 1990 this.precedence = precedence; | |
| 1991 } | |
| 1992 } | |
| 1993 /** | |
| 1994 * Instances of the class `KeywordTokenWithComment` implement a keyword token th
at is preceded | |
| 1995 * by comments. | |
| 1996 * | |
| 1997 * @coverage dart.engine.parser | |
| 1998 */ | |
| 1999 class KeywordTokenWithComment extends KeywordToken { | |
| 2000 | |
| 2001 /** | |
| 2002 * The first comment in the list of comments that precede this token. | |
| 2003 */ | |
| 2004 Token _precedingComment; | |
| 2005 | |
| 2006 /** | |
| 2007 * Initialize a newly created token to to represent the given keyword and to b
e preceded by the | |
| 2008 * comments reachable from the given comment. | |
| 2009 * | |
| 2010 * @param keyword the keyword being represented by this token | |
| 2011 * @param offset the offset from the beginning of the file to the first charac
ter in the token | |
| 2012 * @param precedingComment the first comment in the list of comments that prec
ede this token | |
| 2013 */ | |
| 2014 KeywordTokenWithComment(Keyword keyword, int offset, Token precedingComment) :
super(keyword, offset) { | |
| 2015 this._precedingComment = precedingComment; | |
| 2016 } | |
| 2017 Token copy() => new KeywordTokenWithComment(keyword, offset, copyComments(_pre
cedingComment)); | |
| 2018 Token get precedingComments => _precedingComment; | |
| 2019 void applyDelta(int delta) { | |
| 2020 super.applyDelta(delta); | |
| 2021 Token token = _precedingComment; | |
| 2022 while (token != null) { | |
| 2023 token.applyDelta(delta); | |
| 2024 token = token.next; | |
| 2025 } | |
| 2026 } | |
| 2027 } | |
| 2028 /** | |
| 2029 * The enumeration `TokenType` defines the types of tokens that can be returned
by the | |
| 2030 * scanner. | |
| 2031 * | |
| 2032 * @coverage dart.engine.parser | |
| 2033 */ | |
| 2034 class TokenType extends Enum<TokenType> { | |
| 2035 | |
| 2036 /** | |
| 2037 * The type of the token that marks the end of the input. | |
| 2038 */ | |
| 2039 static final TokenType EOF = new TokenType_EOF('EOF', 0, null, ""); | |
| 2040 static final TokenType DOUBLE = new TokenType.con1('DOUBLE', 1); | |
| 2041 static final TokenType HEXADECIMAL = new TokenType.con1('HEXADECIMAL', 2); | |
| 2042 static final TokenType IDENTIFIER = new TokenType.con1('IDENTIFIER', 3); | |
| 2043 static final TokenType INT = new TokenType.con1('INT', 4); | |
| 2044 static final TokenType KEYWORD = new TokenType.con1('KEYWORD', 5); | |
| 2045 static final TokenType MULTI_LINE_COMMENT = new TokenType.con1('MULTI_LINE_COM
MENT', 6); | |
| 2046 static final TokenType SCRIPT_TAG = new TokenType.con1('SCRIPT_TAG', 7); | |
| 2047 static final TokenType SINGLE_LINE_COMMENT = new TokenType.con1('SINGLE_LINE_C
OMMENT', 8); | |
| 2048 static final TokenType STRING = new TokenType.con1('STRING', 9); | |
| 2049 static final TokenType AMPERSAND = new TokenType.con2('AMPERSAND', 10, TokenCl
ass.BITWISE_AND_OPERATOR, "&"); | |
| 2050 static final TokenType AMPERSAND_AMPERSAND = new TokenType.con2('AMPERSAND_AMP
ERSAND', 11, TokenClass.LOGICAL_AND_OPERATOR, "&&"); | |
| 2051 static final TokenType AMPERSAND_EQ = new TokenType.con2('AMPERSAND_EQ', 12, T
okenClass.ASSIGNMENT_OPERATOR, "&="); | |
| 2052 static final TokenType AT = new TokenType.con2('AT', 13, null, "@"); | |
| 2053 static final TokenType BANG = new TokenType.con2('BANG', 14, TokenClass.UNARY_
PREFIX_OPERATOR, "!"); | |
| 2054 static final TokenType BANG_EQ = new TokenType.con2('BANG_EQ', 15, TokenClass.
EQUALITY_OPERATOR, "!="); | |
| 2055 static final TokenType BAR = new TokenType.con2('BAR', 16, TokenClass.BITWISE_
OR_OPERATOR, "|"); | |
| 2056 static final TokenType BAR_BAR = new TokenType.con2('BAR_BAR', 17, TokenClass.
LOGICAL_OR_OPERATOR, "||"); | |
| 2057 static final TokenType BAR_EQ = new TokenType.con2('BAR_EQ', 18, TokenClass.AS
SIGNMENT_OPERATOR, "|="); | |
| 2058 static final TokenType COLON = new TokenType.con2('COLON', 19, null, ":"); | |
| 2059 static final TokenType COMMA = new TokenType.con2('COMMA', 20, null, ","); | |
| 2060 static final TokenType CARET = new TokenType.con2('CARET', 21, TokenClass.BITW
ISE_XOR_OPERATOR, "^"); | |
| 2061 static final TokenType CARET_EQ = new TokenType.con2('CARET_EQ', 22, TokenClas
s.ASSIGNMENT_OPERATOR, "^="); | |
| 2062 static final TokenType CLOSE_CURLY_BRACKET = new TokenType.con2('CLOSE_CURLY_B
RACKET', 23, null, "}"); | |
| 2063 static final TokenType CLOSE_PAREN = new TokenType.con2('CLOSE_PAREN', 24, nul
l, ")"); | |
| 2064 static final TokenType CLOSE_SQUARE_BRACKET = new TokenType.con2('CLOSE_SQUARE
_BRACKET', 25, null, "]"); | |
| 2065 static final TokenType EQ = new TokenType.con2('EQ', 26, TokenClass.ASSIGNMENT
_OPERATOR, "="); | |
| 2066 static final TokenType EQ_EQ = new TokenType.con2('EQ_EQ', 27, TokenClass.EQUA
LITY_OPERATOR, "=="); | |
| 2067 static final TokenType FUNCTION = new TokenType.con2('FUNCTION', 28, null, "=>
"); | |
| 2068 static final TokenType GT = new TokenType.con2('GT', 29, TokenClass.RELATIONAL
_OPERATOR, ">"); | |
| 2069 static final TokenType GT_EQ = new TokenType.con2('GT_EQ', 30, TokenClass.RELA
TIONAL_OPERATOR, ">="); | |
| 2070 static final TokenType GT_GT = new TokenType.con2('GT_GT', 31, TokenClass.SHIF
T_OPERATOR, ">>"); | |
| 2071 static final TokenType GT_GT_EQ = new TokenType.con2('GT_GT_EQ', 32, TokenClas
s.ASSIGNMENT_OPERATOR, ">>="); | |
| 2072 static final TokenType HASH = new TokenType.con2('HASH', 33, null, "#"); | |
| 2073 static final TokenType INDEX = new TokenType.con2('INDEX', 34, TokenClass.UNAR
Y_POSTFIX_OPERATOR, "[]"); | |
| 2074 static final TokenType INDEX_EQ = new TokenType.con2('INDEX_EQ', 35, TokenClas
s.UNARY_POSTFIX_OPERATOR, "[]="); | |
| 2075 static final TokenType IS = new TokenType.con2('IS', 36, TokenClass.RELATIONAL
_OPERATOR, "is"); | |
| 2076 static final TokenType LT = new TokenType.con2('LT', 37, TokenClass.RELATIONAL
_OPERATOR, "<"); | |
| 2077 static final TokenType LT_EQ = new TokenType.con2('LT_EQ', 38, TokenClass.RELA
TIONAL_OPERATOR, "<="); | |
| 2078 static final TokenType LT_LT = new TokenType.con2('LT_LT', 39, TokenClass.SHIF
T_OPERATOR, "<<"); | |
| 2079 static final TokenType LT_LT_EQ = new TokenType.con2('LT_LT_EQ', 40, TokenClas
s.ASSIGNMENT_OPERATOR, "<<="); | |
| 2080 static final TokenType MINUS = new TokenType.con2('MINUS', 41, TokenClass.ADDI
TIVE_OPERATOR, "-"); | |
| 2081 static final TokenType MINUS_EQ = new TokenType.con2('MINUS_EQ', 42, TokenClas
s.ASSIGNMENT_OPERATOR, "-="); | |
| 2082 static final TokenType MINUS_MINUS = new TokenType.con2('MINUS_MINUS', 43, Tok
enClass.UNARY_PREFIX_OPERATOR, "--"); | |
| 2083 static final TokenType OPEN_CURLY_BRACKET = new TokenType.con2('OPEN_CURLY_BRA
CKET', 44, null, "{"); | |
| 2084 static final TokenType OPEN_PAREN = new TokenType.con2('OPEN_PAREN', 45, Token
Class.UNARY_POSTFIX_OPERATOR, "("); | |
| 2085 static final TokenType OPEN_SQUARE_BRACKET = new TokenType.con2('OPEN_SQUARE_B
RACKET', 46, TokenClass.UNARY_POSTFIX_OPERATOR, "["); | |
| 2086 static final TokenType PERCENT = new TokenType.con2('PERCENT', 47, TokenClass.
MULTIPLICATIVE_OPERATOR, "%"); | |
| 2087 static final TokenType PERCENT_EQ = new TokenType.con2('PERCENT_EQ', 48, Token
Class.ASSIGNMENT_OPERATOR, "%="); | |
| 2088 static final TokenType PERIOD = new TokenType.con2('PERIOD', 49, TokenClass.UN
ARY_POSTFIX_OPERATOR, "."); | |
| 2089 static final TokenType PERIOD_PERIOD = new TokenType.con2('PERIOD_PERIOD', 50,
TokenClass.CASCADE_OPERATOR, ".."); | |
| 2090 static final TokenType PLUS = new TokenType.con2('PLUS', 51, TokenClass.ADDITI
VE_OPERATOR, "+"); | |
| 2091 static final TokenType PLUS_EQ = new TokenType.con2('PLUS_EQ', 52, TokenClass.
ASSIGNMENT_OPERATOR, "+="); | |
| 2092 static final TokenType PLUS_PLUS = new TokenType.con2('PLUS_PLUS', 53, TokenCl
ass.UNARY_PREFIX_OPERATOR, "++"); | |
| 2093 static final TokenType QUESTION = new TokenType.con2('QUESTION', 54, TokenClas
s.CONDITIONAL_OPERATOR, "?"); | |
| 2094 static final TokenType SEMICOLON = new TokenType.con2('SEMICOLON', 55, null, "
;"); | |
| 2095 static final TokenType SLASH = new TokenType.con2('SLASH', 56, TokenClass.MULT
IPLICATIVE_OPERATOR, "/"); | |
| 2096 static final TokenType SLASH_EQ = new TokenType.con2('SLASH_EQ', 57, TokenClas
s.ASSIGNMENT_OPERATOR, "/="); | |
| 2097 static final TokenType STAR = new TokenType.con2('STAR', 58, TokenClass.MULTIP
LICATIVE_OPERATOR, "*"); | |
| 2098 static final TokenType STAR_EQ = new TokenType.con2('STAR_EQ', 59, TokenClass.
ASSIGNMENT_OPERATOR, "*="); | |
| 2099 static final TokenType STRING_INTERPOLATION_EXPRESSION = new TokenType.con2('S
TRING_INTERPOLATION_EXPRESSION', 60, null, "\${"); | |
| 2100 static final TokenType STRING_INTERPOLATION_IDENTIFIER = new TokenType.con2('S
TRING_INTERPOLATION_IDENTIFIER', 61, null, "\$"); | |
| 2101 static final TokenType TILDE = new TokenType.con2('TILDE', 62, TokenClass.UNAR
Y_PREFIX_OPERATOR, "~"); | |
| 2102 static final TokenType TILDE_SLASH = new TokenType.con2('TILDE_SLASH', 63, Tok
enClass.MULTIPLICATIVE_OPERATOR, "~/"); | |
| 2103 static final TokenType TILDE_SLASH_EQ = new TokenType.con2('TILDE_SLASH_EQ', 6
4, TokenClass.ASSIGNMENT_OPERATOR, "~/="); | |
| 2104 static final TokenType BACKPING = new TokenType.con2('BACKPING', 65, null, "`"
); | |
| 2105 static final TokenType BACKSLASH = new TokenType.con2('BACKSLASH', 66, null, "
\\"); | |
| 2106 static final TokenType PERIOD_PERIOD_PERIOD = new TokenType.con2('PERIOD_PERIO
D_PERIOD', 67, null, "..."); | |
| 2107 static final List<TokenType> values = [ | |
| 2108 EOF, | |
| 2109 DOUBLE, | |
| 2110 HEXADECIMAL, | |
| 2111 IDENTIFIER, | |
| 2112 INT, | |
| 2113 KEYWORD, | |
| 2114 MULTI_LINE_COMMENT, | |
| 2115 SCRIPT_TAG, | |
| 2116 SINGLE_LINE_COMMENT, | |
| 2117 STRING, | |
| 2118 AMPERSAND, | |
| 2119 AMPERSAND_AMPERSAND, | |
| 2120 AMPERSAND_EQ, | |
| 2121 AT, | |
| 2122 BANG, | |
| 2123 BANG_EQ, | |
| 2124 BAR, | |
| 2125 BAR_BAR, | |
| 2126 BAR_EQ, | |
| 2127 COLON, | |
| 2128 COMMA, | |
| 2129 CARET, | |
| 2130 CARET_EQ, | |
| 2131 CLOSE_CURLY_BRACKET, | |
| 2132 CLOSE_PAREN, | |
| 2133 CLOSE_SQUARE_BRACKET, | |
| 2134 EQ, | |
| 2135 EQ_EQ, | |
| 2136 FUNCTION, | |
| 2137 GT, | |
| 2138 GT_EQ, | |
| 2139 GT_GT, | |
| 2140 GT_GT_EQ, | |
| 2141 HASH, | |
| 2142 INDEX, | |
| 2143 INDEX_EQ, | |
| 2144 IS, | |
| 2145 LT, | |
| 2146 LT_EQ, | |
| 2147 LT_LT, | |
| 2148 LT_LT_EQ, | |
| 2149 MINUS, | |
| 2150 MINUS_EQ, | |
| 2151 MINUS_MINUS, | |
| 2152 OPEN_CURLY_BRACKET, | |
| 2153 OPEN_PAREN, | |
| 2154 OPEN_SQUARE_BRACKET, | |
| 2155 PERCENT, | |
| 2156 PERCENT_EQ, | |
| 2157 PERIOD, | |
| 2158 PERIOD_PERIOD, | |
| 2159 PLUS, | |
| 2160 PLUS_EQ, | |
| 2161 PLUS_PLUS, | |
| 2162 QUESTION, | |
| 2163 SEMICOLON, | |
| 2164 SLASH, | |
| 2165 SLASH_EQ, | |
| 2166 STAR, | |
| 2167 STAR_EQ, | |
| 2168 STRING_INTERPOLATION_EXPRESSION, | |
| 2169 STRING_INTERPOLATION_IDENTIFIER, | |
| 2170 TILDE, | |
| 2171 TILDE_SLASH, | |
| 2172 TILDE_SLASH_EQ, | |
| 2173 BACKPING, | |
| 2174 BACKSLASH, | |
| 2175 PERIOD_PERIOD_PERIOD]; | |
| 2176 | |
| 2177 /** | |
| 2178 * The class of the token. | |
| 2179 */ | |
| 2180 TokenClass _tokenClass; | |
| 2181 | |
| 2182 /** | |
| 2183 * The lexeme that defines this type of token, or `null` if there is more than
one possible | |
| 2184 * lexeme for this type of token. | |
| 2185 */ | |
| 2186 String lexeme; | |
| 2187 TokenType.con1(String name, int ordinal) : this.con2(name, ordinal, TokenClass
.NO_CLASS, null); | |
| 2188 TokenType.con2(String name, int ordinal, TokenClass tokenClass, String lexeme)
: super(name, ordinal) { | |
| 2189 this._tokenClass = tokenClass == null ? TokenClass.NO_CLASS : tokenClass; | |
| 2190 this.lexeme = lexeme; | |
| 2191 } | |
| 2192 | |
| 2193 /** | |
| 2194 * Return the precedence of the token, or `0` if the token does not represent
an operator. | |
| 2195 * | |
| 2196 * @return the precedence of the token | |
| 2197 */ | |
| 2198 int get precedence => _tokenClass.precedence; | |
| 2199 | |
| 2200 /** | |
| 2201 * Return `true` if this type of token represents an additive operator. | |
| 2202 * | |
| 2203 * @return `true` if this type of token represents an additive operator | |
| 2204 */ | |
| 2205 bool get isAdditiveOperator => identical(_tokenClass, TokenClass.ADDITIVE_OPER
ATOR); | |
| 2206 | |
| 2207 /** | |
| 2208 * Return `true` if this type of token represents an assignment operator. | |
| 2209 * | |
| 2210 * @return `true` if this type of token represents an assignment operator | |
| 2211 */ | |
| 2212 bool get isAssignmentOperator => identical(_tokenClass, TokenClass.ASSIGNMENT_
OPERATOR); | |
| 2213 | |
| 2214 /** | |
| 2215 * Return `true` if this type of token represents an associative operator. An
associative | |
| 2216 * operator is an operator for which the following equality is true: | |
| 2217 * `(a * b) * c == a * (b * c)`. In other words, if the result of applying the
operator to | |
| 2218 * multiple operands does not depend on the order in which those applications
occur. | |
| 2219 * | |
| 2220 * Note: This method considers the logical-and and logical-or operators to be
associative, even | |
| 2221 * though the order in which the application of those operators can have an ef
fect because | |
| 2222 * evaluation of the right-hand operand is conditional. | |
| 2223 * | |
| 2224 * @return `true` if this type of token represents an associative operator | |
| 2225 */ | |
| 2226 bool get isAssociativeOperator => identical(this, AMPERSAND) || identical(this
, AMPERSAND_AMPERSAND) || identical(this, BAR) || identical(this, BAR_BAR) || id
entical(this, CARET) || identical(this, PLUS) || identical(this, STAR); | |
| 2227 | |
| 2228 /** | |
| 2229 * Return `true` if this type of token represents an equality operator. | |
| 2230 * | |
| 2231 * @return `true` if this type of token represents an equality operator | |
| 2232 */ | |
| 2233 bool get isEqualityOperator => identical(_tokenClass, TokenClass.EQUALITY_OPER
ATOR); | |
| 2234 | |
| 2235 /** | |
| 2236 * Return `true` if this type of token represents an increment operator. | |
| 2237 * | |
| 2238 * @return `true` if this type of token represents an increment operator | |
| 2239 */ | |
| 2240 bool get isIncrementOperator => identical(lexeme, "++") || identical(lexeme, "
--"); | |
| 2241 | |
| 2242 /** | |
| 2243 * Return `true` if this type of token represents a multiplicative operator. | |
| 2244 * | |
| 2245 * @return `true` if this type of token represents a multiplicative operator | |
| 2246 */ | |
| 2247 bool get isMultiplicativeOperator => identical(_tokenClass, TokenClass.MULTIPL
ICATIVE_OPERATOR); | |
| 2248 | |
| 2249 /** | |
| 2250 * Return `true` if this token type represents an operator. | |
| 2251 * | |
| 2252 * @return `true` if this token type represents an operator | |
| 2253 */ | |
| 2254 bool get isOperator => _tokenClass != TokenClass.NO_CLASS && this != OPEN_PARE
N && this != OPEN_SQUARE_BRACKET && this != PERIOD; | |
| 2255 | |
| 2256 /** | |
| 2257 * Return `true` if this type of token represents a relational operator. | |
| 2258 * | |
| 2259 * @return `true` if this type of token represents a relational operator | |
| 2260 */ | |
| 2261 bool get isRelationalOperator => identical(_tokenClass, TokenClass.RELATIONAL_
OPERATOR); | |
| 2262 | |
| 2263 /** | |
| 2264 * Return `true` if this type of token represents a shift operator. | |
| 2265 * | |
| 2266 * @return `true` if this type of token represents a shift operator | |
| 2267 */ | |
| 2268 bool get isShiftOperator => identical(_tokenClass, TokenClass.SHIFT_OPERATOR); | |
| 2269 | |
| 2270 /** | |
| 2271 * Return `true` if this type of token represents a unary postfix operator. | |
| 2272 * | |
| 2273 * @return `true` if this type of token represents a unary postfix operator | |
| 2274 */ | |
| 2275 bool get isUnaryPostfixOperator => identical(_tokenClass, TokenClass.UNARY_POS
TFIX_OPERATOR); | |
| 2276 | |
| 2277 /** | |
| 2278 * Return `true` if this type of token represents a unary prefix operator. | |
| 2279 * | |
| 2280 * @return `true` if this type of token represents a unary prefix operator | |
| 2281 */ | |
| 2282 bool get isUnaryPrefixOperator => identical(_tokenClass, TokenClass.UNARY_PREF
IX_OPERATOR); | |
| 2283 | |
| 2284 /** | |
| 2285 * Return `true` if this token type represents an operator that can be defined
by users. | |
| 2286 * | |
| 2287 * @return `true` if this token type represents an operator that can be define
d by users | |
| 2288 */ | |
| 2289 bool get isUserDefinableOperator => identical(lexeme, "==") || identical(lexem
e, "~") || identical(lexeme, "[]") || identical(lexeme, "[]=") || identical(lexe
me, "*") || identical(lexeme, "/") || identical(lexeme, "%") || identical(lexeme
, "~/") || identical(lexeme, "+") || identical(lexeme, "-") || identical(lexeme,
"<<") || identical(lexeme, ">>") || identical(lexeme, ">=") || identical(lexeme
, ">") || identical(lexeme, "<=") || identical(lexeme, "<") || identical(lexeme,
"&") || identical(lexeme, "^") || identical(lexeme, "|"); | |
| 2290 } | |
| 2291 class TokenType_EOF extends TokenType { | |
| 2292 TokenType_EOF(String name, int ordinal, TokenClass arg0, String arg1) : super.
con2(name, ordinal, arg0, arg1); | |
| 2293 String toString() => "-eof-"; | |
| 2294 } | |
| OLD | NEW |