| OLD | NEW |
| 1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
| 4 // Generated by scripts/tokenizer_gen.py. | 4 // Generated by scripts/tokenizer_gen.py. |
| 5 | 5 |
| 6 | 6 |
| 7 interface TokenSource { | 7 interface TokenSource { |
| 8 Token next(); | 8 Token next(); |
| 9 } | 9 } |
| 10 | 10 |
| 11 class InterpStack { | 11 class InterpStack { |
| (...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 160 | 160 |
| 161 if (_skipWhitespace) { | 161 if (_skipWhitespace) { |
| 162 return next(); | 162 return next(); |
| 163 } else { | 163 } else { |
| 164 return _finishToken(TokenKind.COMMENT); | 164 return _finishToken(TokenKind.COMMENT); |
| 165 } | 165 } |
| 166 } | 166 } |
| 167 | 167 |
| 168 void eatDigits() { | 168 void eatDigits() { |
| 169 while (_index < _text.length) { | 169 while (_index < _text.length) { |
| 170 if (isDigit(_text.charCodeAt(_index))) { | 170 if (TokenizerHelpers.isDigit(_text.charCodeAt(_index))) { |
| 171 _index++; | 171 _index++; |
| 172 } else { | 172 } else { |
| 173 return; | 173 return; |
| 174 } | 174 } |
| 175 } | 175 } |
| 176 } | 176 } |
| 177 | 177 |
| 178 static int _hexDigit(int c) { | 178 static int _hexDigit(int c) { |
| 179 if(c >= 48/*0*/ && c <= 57/*9*/) { | 179 if(c >= 48/*0*/ && c <= 57/*9*/) { |
| 180 return c - 48; | 180 return c - 48; |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 223 return new LiteralToken(TokenKind.HEX_INTEGER, _source, _startIndex, | 223 return new LiteralToken(TokenKind.HEX_INTEGER, _source, _startIndex, |
| 224 _index, value); | 224 _index, value); |
| 225 } | 225 } |
| 226 | 226 |
| 227 Token finishNumber() { | 227 Token finishNumber() { |
| 228 eatDigits(); | 228 eatDigits(); |
| 229 | 229 |
| 230 if (_peekChar() == 46/*.*/) { | 230 if (_peekChar() == 46/*.*/) { |
| 231 // Handle the case of 1.toString(). | 231 // Handle the case of 1.toString(). |
| 232 _nextChar(); | 232 _nextChar(); |
| 233 if (isDigit(_peekChar())) { | 233 if (TokenizerHelpers.isDigit(_peekChar())) { |
| 234 eatDigits(); | 234 eatDigits(); |
| 235 return finishNumberExtra(TokenKind.DOUBLE); | 235 return finishNumberExtra(TokenKind.DOUBLE); |
| 236 } else { | 236 } else { |
| 237 _index--; | 237 _index--; |
| 238 } | 238 } |
| 239 } | 239 } |
| 240 | 240 |
| 241 return finishNumberExtra(TokenKind.INTEGER); | 241 return finishNumberExtra(TokenKind.INTEGER); |
| 242 } | 242 } |
| 243 | 243 |
| 244 Token finishNumberExtra(int kind) { | 244 Token finishNumberExtra(int kind) { |
| 245 if (_maybeEatChar(101/*e*/) || _maybeEatChar(69/*E*/)) { | 245 if (_maybeEatChar(101/*e*/) || _maybeEatChar(69/*E*/)) { |
| 246 kind = TokenKind.DOUBLE; | 246 kind = TokenKind.DOUBLE; |
| 247 _maybeEatChar(45/*-*/); | 247 _maybeEatChar(45/*-*/); |
| 248 _maybeEatChar(43/*+*/); | 248 _maybeEatChar(43/*+*/); |
| 249 eatDigits(); | 249 eatDigits(); |
| 250 } | 250 } |
| 251 if (_peekChar() != 0 && isIdentifierStart(_peekChar())) { | 251 if (_peekChar() != 0 && TokenizerHelpers.isIdentifierStart(_peekChar())) { |
| 252 _nextChar(); | 252 _nextChar(); |
| 253 return _errorToken("illegal character in number"); | 253 return _errorToken("illegal character in number"); |
| 254 } | 254 } |
| 255 | 255 |
| 256 return _finishToken(kind); | 256 return _finishToken(kind); |
| 257 } | 257 } |
| 258 | 258 |
| 259 Token _makeStringToken(List<int> buf, bool isPart) { | 259 Token _makeStringToken(List<int> buf, bool isPart) { |
| 260 final s = new String.fromCharCodes(buf); | 260 final s = new String.fromCharCodes(buf); |
| 261 final kind = isPart ? TokenKind.STRING_PART : TokenKind.STRING; | 261 final kind = isPart ? TokenKind.STRING_PART : TokenKind.STRING; |
| (...skipping 174 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 436 return hexValue; | 436 return hexValue; |
| 437 } else if (hexValue <= 0x10FFFF){ | 437 } else if (hexValue <= 0x10FFFF){ |
| 438 world.fatal('unicode values greater than 2 bytes not implemented yet'); | 438 world.fatal('unicode values greater than 2 bytes not implemented yet'); |
| 439 return -1; | 439 return -1; |
| 440 } else { | 440 } else { |
| 441 return -1; | 441 return -1; |
| 442 } | 442 } |
| 443 } | 443 } |
| 444 | 444 |
| 445 Token finishDot() { | 445 Token finishDot() { |
| 446 if (isDigit(_peekChar())) { | 446 if (TokenizerHelpers.isDigit(_peekChar())) { |
| 447 eatDigits(); | 447 eatDigits(); |
| 448 return finishNumberExtra(TokenKind.DOUBLE); | 448 return finishNumberExtra(TokenKind.DOUBLE); |
| 449 } else { | 449 } else { |
| 450 return _finishToken(TokenKind.DOT); | 450 return _finishToken(TokenKind.DOT); |
| 451 } | 451 } |
| 452 } | 452 } |
| 453 | 453 |
| 454 Token finishIdentifier(int ch) { | 454 Token finishIdentifier(int ch) { |
| 455 if (_interpStack != null && _interpStack.depth == -1) { | 455 if (_interpStack != null && _interpStack.depth == -1) { |
| 456 _interpStack.depth = 0; | 456 _interpStack.depth = 0; |
| 457 if (ch == 36/*$*/) { | 457 if (ch == 36/*$*/) { |
| 458 return _errorToken( | 458 return _errorToken( |
| 459 @"illegal character after $ in string interpolation"); | 459 @"illegal character after $ in string interpolation"); |
| 460 } | 460 } |
| 461 while (_index < _text.length) { | 461 while (_index < _text.length) { |
| 462 if (!isInterpIdentifierPart(_text.charCodeAt(_index++))) { | 462 if (!TokenizerHelpers.isInterpIdentifierPart(_text.charCodeAt(_index++))
) { |
| 463 _index--; | 463 _index--; |
| 464 break; | 464 break; |
| 465 } | 465 } |
| 466 } | 466 } |
| 467 } else { | 467 } else { |
| 468 while (_index < _text.length) { | 468 while (_index < _text.length) { |
| 469 if (!isIdentifierPart(_text.charCodeAt(_index++))) { | 469 if (!TokenizerHelpers.isIdentifierPart(_text.charCodeAt(_index++))) { |
| 470 _index--; | 470 _index--; |
| 471 break; | 471 break; |
| 472 } | 472 } |
| 473 } | 473 } |
| 474 } | 474 } |
| 475 int kind = getIdentifierKind(); | 475 int kind = getIdentifierKind(); |
| 476 if (kind == TokenKind.IDENTIFIER) { | 476 if (kind == TokenKind.IDENTIFIER) { |
| 477 return _finishToken(TokenKind.IDENTIFIER); | 477 return _finishToken(TokenKind.IDENTIFIER); |
| 478 } else { | 478 } else { |
| 479 return _finishToken(kind); | 479 return _finishToken(kind); |
| 480 } | 480 } |
| 481 } | 481 } |
| 482 } | 482 } |
| OLD | NEW |