| OLD | NEW | 
|---|
| 1 // Copyright (c) 2016, the Dart project authors.  Please see the AUTHORS file | 1 // Copyright (c) 2016, the Dart project authors.  Please see the AUTHORS file | 
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a | 
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. | 
| 4 | 4 | 
| 5 library fasta.analyzer.token_utils; | 5 library fasta.analyzer.token_utils; | 
| 6 | 6 | 
| 7 import 'package:front_end/src/fasta/parser/error_kind.dart' show ErrorKind; | 7 import 'package:front_end/src/fasta/parser/error_kind.dart' show ErrorKind; | 
| 8 | 8 | 
| 9 import 'package:front_end/src/fasta/scanner/error_token.dart' show ErrorToken; | 9 import 'package:front_end/src/fasta/scanner/error_token.dart' show ErrorToken; | 
| 10 | 10 | 
| 11 import 'package:front_end/src/fasta/scanner/keyword.dart' show Keyword; | 11 import 'package:front_end/src/fasta/scanner/keyword.dart' show Keyword; | 
| 12 | 12 | 
| 13 import 'package:front_end/src/fasta/scanner/precedence.dart'; | 13 import 'package:front_end/src/fasta/scanner/precedence.dart'; | 
| 14 | 14 | 
| 15 import 'package:front_end/src/fasta/scanner/token.dart' | 15 import 'package:front_end/src/fasta/scanner/token.dart' | 
| 16     show BeginGroupToken, KeywordToken, StringToken, SymbolToken, Token; | 16     show | 
|  | 17         BeginGroupToken, | 
|  | 18         CommentToken, | 
|  | 19         DartDocToken, | 
|  | 20         KeywordToken, | 
|  | 21         StringToken, | 
|  | 22         SymbolToken, | 
|  | 23         Token; | 
| 17 | 24 | 
| 18 import 'package:front_end/src/fasta/scanner/token_constants.dart'; | 25 import 'package:front_end/src/fasta/scanner/token_constants.dart'; | 
| 19 | 26 | 
| 20 import 'package:front_end/src/scanner/token.dart' as analyzer | 27 import 'package:front_end/src/scanner/token.dart' as analyzer | 
| 21     show | 28     show | 
| 22         BeginToken, | 29         BeginToken, | 
| 23         BeginTokenWithComment, | 30         BeginTokenWithComment, | 
| 24         CommentToken, | 31         CommentToken, | 
| 25         Keyword, | 32         Keyword, | 
| 26         KeywordToken, | 33         KeywordToken, | 
| 27         KeywordTokenWithComment, | 34         KeywordTokenWithComment, | 
| 28         StringToken, | 35         StringToken, | 
| 29         StringTokenWithComment, | 36         StringTokenWithComment, | 
| 30         Token, | 37         Token, | 
| 31         TokenWithComment; | 38         TokenWithComment; | 
| 32 | 39 | 
| 33 import 'package:front_end/src/scanner/errors.dart' as analyzer show ScannerError
     Code; | 40 import 'package:front_end/src/scanner/errors.dart' as analyzer | 
|  | 41     show ScannerErrorCode; | 
| 34 | 42 | 
| 35 import 'package:analyzer/dart/ast/token.dart' show TokenType; | 43 import 'package:analyzer/dart/ast/token.dart' show TokenType; | 
| 36 | 44 | 
| 37 import 'package:front_end/src/fasta/errors.dart' show internalError; | 45 import 'package:front_end/src/fasta/errors.dart' show internalError; | 
| 38 | 46 | 
| 39 /// Class capable of converting a stream of Fasta tokens to a stream of analyzer | 47 /// Class capable of converting a stream of Fasta tokens to a stream of analyzer | 
| 40 /// tokens. | 48 /// tokens. | 
| 41 /// | 49 /// | 
| 42 /// This is a class rather than an ordinary method so that it can be subclassed | 50 /// This is a class rather than an ordinary method so that it can be subclassed | 
| 43 /// in tests. | 51 /// in tests. | 
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 80     _analyzerTokenTail = _analyzerTokenHead; | 88     _analyzerTokenTail = _analyzerTokenHead; | 
| 81     _beginTokenStack = [null]; | 89     _beginTokenStack = [null]; | 
| 82     _endTokenStack = <Token>[null]; | 90     _endTokenStack = <Token>[null]; | 
| 83 | 91 | 
| 84     while (true) { | 92     while (true) { | 
| 85       if (token.info.kind == BAD_INPUT_TOKEN) { | 93       if (token.info.kind == BAD_INPUT_TOKEN) { | 
| 86         ErrorToken errorToken = token; | 94         ErrorToken errorToken = token; | 
| 87         _translateErrorToken(errorToken); | 95         _translateErrorToken(errorToken); | 
| 88       } else { | 96       } else { | 
| 89         var translatedToken = translateToken( | 97         var translatedToken = translateToken( | 
| 90             token, translateCommentTokens(token.precedingComments)); | 98             token, translateCommentTokens(token.precedingCommentTokens)); | 
| 91         _matchGroups(token, translatedToken); | 99         _matchGroups(token, translatedToken); | 
| 92         translatedToken.setNext(translatedToken); | 100         translatedToken.setNext(translatedToken); | 
| 93         _analyzerTokenTail.setNext(translatedToken); | 101         _analyzerTokenTail.setNext(translatedToken); | 
| 94         translatedToken.previous = _analyzerTokenTail; | 102         translatedToken.previous = _analyzerTokenTail; | 
| 95         _analyzerTokenTail = translatedToken; | 103         _analyzerTokenTail = translatedToken; | 
| 96       } | 104       } | 
| 97       if (token.isEof) { | 105       if (token.isEof) { | 
| 98         return _analyzerTokenHead.next; | 106         return _analyzerTokenHead.next; | 
| 99       } | 107       } | 
| 100       token = token.next; | 108       token = token.next; | 
| (...skipping 184 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 285       tail.next = fromAnalyzerToken(token); | 293       tail.next = fromAnalyzerToken(token); | 
| 286       tail.next.previousToken = tail; | 294       tail.next.previousToken = tail; | 
| 287       tail = tail.next; | 295       tail = tail.next; | 
| 288       token = token.next; | 296       token = token.next; | 
| 289     } | 297     } | 
| 290     return head; | 298     return head; | 
| 291   } | 299   } | 
| 292 | 300 | 
| 293   analyzer.Token translateAndAppend(analyzer.Token analyzerToken) { | 301   analyzer.Token translateAndAppend(analyzer.Token analyzerToken) { | 
| 294     var token = fromAnalyzerToken(analyzerToken); | 302     var token = fromAnalyzerToken(analyzerToken); | 
| 295     token.precedingComments = | 303     token.precedingCommentTokens = | 
| 296         translateComments(analyzerToken.precedingComments); | 304         translateComments(analyzerToken.precedingComments); | 
| 297     tokenTail.next = token; | 305     tokenTail.next = token; | 
| 298     tokenTail.next.previousToken = tokenTail; | 306     tokenTail.next.previousToken = tokenTail; | 
| 299     tokenTail = token; | 307     tokenTail = token; | 
| 300     matchGroups(analyzerToken, token); | 308     matchGroups(analyzerToken, token); | 
| 301     return analyzerToken.next; | 309     return analyzerToken.next; | 
| 302   } | 310   } | 
| 303 | 311 | 
| 304   while (true) { | 312   while (true) { | 
| 305     // TODO(paulberry): join up begingroup/endgroup. | 313     // TODO(paulberry): join up begingroup/endgroup. | 
| 306     if (analyzerToken.type == TokenType.EOF) { | 314     if (analyzerToken.type == TokenType.EOF) { | 
| 307       tokenTail.next = new SymbolToken(EOF_INFO, analyzerToken.offset); | 315       tokenTail.next = new SymbolToken(EOF_INFO, analyzerToken.offset); | 
| 308       tokenTail.next.previousToken = tokenTail; | 316       tokenTail.next.previousToken = tokenTail; | 
| 309       tokenTail.next.precedingComments = | 317       tokenTail.next.precedingCommentTokens = | 
| 310           translateComments(analyzerToken.precedingComments); | 318           translateComments(analyzerToken.precedingComments); | 
| 311       return tokenHead.next; | 319       return tokenHead.next; | 
| 312     } | 320     } | 
| 313     analyzerToken = translateAndAppend(analyzerToken); | 321     analyzerToken = translateAndAppend(analyzerToken); | 
| 314   } | 322   } | 
| 315 } | 323 } | 
| 316 | 324 | 
| 317 /// Converts a single analyzer token into a Fasta token. | 325 /// Converts a single analyzer token into a Fasta token. | 
| 318 Token fromAnalyzerToken(analyzer.Token token) { | 326 Token fromAnalyzerToken(analyzer.Token token) { | 
| 319   Token beginGroup(PrecedenceInfo info) => | 327   Token beginGroup(PrecedenceInfo info) => | 
| (...skipping 23 matching lines...) Expand all  Loading... | 
| 343       return string(INT_INFO); | 351       return string(INT_INFO); | 
| 344     case TokenType.KEYWORD: | 352     case TokenType.KEYWORD: | 
| 345       var keyword = Keyword.keywords[token.lexeme]; | 353       var keyword = Keyword.keywords[token.lexeme]; | 
| 346       if (keyword != null) { | 354       if (keyword != null) { | 
| 347         return new KeywordToken(keyword, token.offset); | 355         return new KeywordToken(keyword, token.offset); | 
| 348       } else { | 356       } else { | 
| 349         return internalError("Unrecognized keyword: '${token.lexeme}'."); | 357         return internalError("Unrecognized keyword: '${token.lexeme}'."); | 
| 350       } | 358       } | 
| 351       break; | 359       break; | 
| 352     case TokenType.MULTI_LINE_COMMENT: | 360     case TokenType.MULTI_LINE_COMMENT: | 
| 353       return string(MULTI_LINE_COMMENT_INFO); | 361       if (token.lexeme.startsWith('/**')) { | 
|  | 362         return new DartDocToken.fromSubstring( | 
|  | 363             MULTI_LINE_COMMENT_INFO, token.lexeme, 0, token.lexeme.length, 0); | 
|  | 364       } | 
|  | 365       return new CommentToken.fromSubstring( | 
|  | 366           MULTI_LINE_COMMENT_INFO, token.lexeme, 0, token.lexeme.length, 0); | 
| 354     case TokenType.SCRIPT_TAG: | 367     case TokenType.SCRIPT_TAG: | 
| 355       return string(SCRIPT_INFO); | 368       return string(SCRIPT_INFO); | 
| 356     case TokenType.SINGLE_LINE_COMMENT: | 369     case TokenType.SINGLE_LINE_COMMENT: | 
| 357       return string(SINGLE_LINE_COMMENT_INFO); | 370       if (token.lexeme.startsWith('///')) { | 
|  | 371         return new DartDocToken.fromSubstring( | 
|  | 372             SINGLE_LINE_COMMENT_INFO, token.lexeme, 0, token.lexeme.length, 0); | 
|  | 373       } | 
|  | 374       return new CommentToken.fromSubstring( | 
|  | 375           SINGLE_LINE_COMMENT_INFO, token.lexeme, 0, token.lexeme.length, 0); | 
| 358     case TokenType.STRING: | 376     case TokenType.STRING: | 
| 359       return string(STRING_INFO); | 377       return string(STRING_INFO); | 
| 360     case TokenType.AMPERSAND: | 378     case TokenType.AMPERSAND: | 
| 361       return symbol(AMPERSAND_INFO); | 379       return symbol(AMPERSAND_INFO); | 
| 362     case TokenType.AMPERSAND_AMPERSAND: | 380     case TokenType.AMPERSAND_AMPERSAND: | 
| 363       return symbol(AMPERSAND_AMPERSAND_INFO); | 381       return symbol(AMPERSAND_AMPERSAND_INFO); | 
| 364     // case TokenType.AMPERSAND_AMPERSAND_EQ | 382     // case TokenType.AMPERSAND_AMPERSAND_EQ | 
| 365     case TokenType.AMPERSAND_EQ: | 383     case TokenType.AMPERSAND_EQ: | 
| 366       return symbol(AMPERSAND_EQ_INFO); | 384       return symbol(AMPERSAND_EQ_INFO); | 
| 367     case TokenType.AT: | 385     case TokenType.AT: | 
| (...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 500     if (token.info.kind != BAD_INPUT_TOKEN) return false; | 518     if (token.info.kind != BAD_INPUT_TOKEN) return false; | 
| 501     // Otherwise keep looking. | 519     // Otherwise keep looking. | 
| 502   } | 520   } | 
| 503 } | 521 } | 
| 504 | 522 | 
| 505 analyzer.Token toAnalyzerToken(Token token, | 523 analyzer.Token toAnalyzerToken(Token token, | 
| 506     [analyzer.CommentToken commentToken]) { | 524     [analyzer.CommentToken commentToken]) { | 
| 507   if (token == null) return null; | 525   if (token == null) return null; | 
| 508   analyzer.Token makeStringToken(TokenType tokenType) { | 526   analyzer.Token makeStringToken(TokenType tokenType) { | 
| 509     if (commentToken == null) { | 527     if (commentToken == null) { | 
| 510       return new analyzer.StringToken(tokenType, token.lexeme, token.charOffset)
     ; | 528       return new analyzer.StringToken( | 
|  | 529           tokenType, token.lexeme, token.charOffset); | 
| 511     } else { | 530     } else { | 
| 512       return new analyzer.StringTokenWithComment( | 531       return new analyzer.StringTokenWithComment( | 
| 513           tokenType, token.lexeme, token.charOffset, commentToken); | 532           tokenType, token.lexeme, token.charOffset, commentToken); | 
| 514     } | 533     } | 
| 515   } | 534   } | 
| 516 | 535 | 
| 517   analyzer.Token makeBeginToken(TokenType tokenType) { | 536   analyzer.Token makeBeginToken(TokenType tokenType) { | 
| 518     if (commentToken == null) { | 537     if (commentToken == null) { | 
| 519       return new analyzer.BeginToken(tokenType, token.charOffset); | 538       return new analyzer.BeginToken(tokenType, token.charOffset); | 
| 520     } else { | 539     } else { | 
| (...skipping 262 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 783     case PERIOD_PERIOD_PERIOD_TOKEN: | 802     case PERIOD_PERIOD_PERIOD_TOKEN: | 
| 784       return TokenType.PERIOD_PERIOD_PERIOD; | 803       return TokenType.PERIOD_PERIOD_PERIOD; | 
| 785     // case GENERIC_METHOD_TYPE_LIST_TOKEN: | 804     // case GENERIC_METHOD_TYPE_LIST_TOKEN: | 
| 786     //   return TokenType.GENERIC_METHOD_TYPE_LIST; | 805     //   return TokenType.GENERIC_METHOD_TYPE_LIST; | 
| 787     // case GENERIC_METHOD_TYPE_ASSIGN_TOKEN: | 806     // case GENERIC_METHOD_TYPE_ASSIGN_TOKEN: | 
| 788     //   return TokenType.GENERIC_METHOD_TYPE_ASSIGN; | 807     //   return TokenType.GENERIC_METHOD_TYPE_ASSIGN; | 
| 789     default: | 808     default: | 
| 790       return internalError("Unhandled token ${token.info}"); | 809       return internalError("Unhandled token ${token.info}"); | 
| 791   } | 810   } | 
| 792 } | 811 } | 
| OLD | NEW | 
|---|