Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(392)

Side by Side Diff: pkg/front_end/lib/src/fasta/scanner/token.dart

Issue 2763833002: fasta.CommentToken implement analyzer.CommentToken (Closed)
Patch Set: merge Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 library fasta.scanner.token; 5 library fasta.scanner.token;
6 6
7 import '../../scanner/token.dart' as analyzer; 7 import '../../scanner/token.dart' as analyzer;
8 8
9 import 'keyword.dart' show Keyword; 9 import 'keyword.dart' show Keyword;
10 10
11 import 'precedence.dart' show BAD_INPUT_INFO, EOF_INFO, PrecedenceInfo; 11 import 'precedence.dart' show BAD_INPUT_INFO, EOF_INFO, PrecedenceInfo;
12 12
13 import 'token_constants.dart' show IDENTIFIER_TOKEN; 13 import 'token_constants.dart' show IDENTIFIER_TOKEN;
14 14
15 import 'string_canonicalizer.dart'; 15 import 'string_canonicalizer.dart';
16 16
17 /** 17 /**
18 * A token that doubles as a linked list. 18 * A token that doubles as a linked list.
19 */ 19 */
20 abstract class Token implements analyzer.Token { 20 abstract class Token implements analyzer.TokenWithComment {
21 /** 21 /**
22 * The character offset of the start of this token within the source text. 22 * The character offset of the start of this token within the source text.
23 */ 23 */
24 int charOffset; 24 int charOffset;
25 25
26 Token(this.charOffset); 26 Token(this.charOffset);
27 27
28 /** 28 /**
29 * The next token in the token stream. 29 * The next token in the token stream.
30 */ 30 */
31 Token next; 31 Token next;
32 32
33 /** 33 /**
34 * The previous token in the token stream. 34 * The previous token in the token stream.
35 * 35 *
36 * Deprecated :: This exists for compatibility with the Analyzer token stream 36 * Deprecated :: This exists for compatibility with the Analyzer token stream
37 * and will be removed at some future date. 37 * and will be removed at some future date.
38 */ 38 */
39 @deprecated 39 @deprecated
40 Token previousToken; 40 Token previousToken;
41 41
42 /** 42 /**
43 * Return the first comment in the list of comments that precede this token, 43 * Return the first comment in the list of comments that precede this token,
44 * or `null` if there are no comments preceding this token. Additional 44 * or `null` if there are no comments preceding this token. Additional
45 * comments can be reached by following the token stream using [next] until 45 * comments can be reached by following the token stream using [next] until
46 * `null` is returned. 46 * `null` is returned.
47 */ 47 */
48 Token precedingComments; 48 CommentToken precedingCommentTokens;
49
50 @override
51 analyzer.CommentToken get precedingComments => precedingCommentTokens;
52
53 @override
54 void set precedingComments(analyzer.CommentToken token) {
55 precedingCommentTokens = token;
56 }
49 57
50 /** 58 /**
51 * The precedence info for this token. [info] determines the kind and the 59 * The precedence info for this token. [info] determines the kind and the
52 * precedence level of this token. 60 * precedence level of this token.
53 * 61 *
54 * Defined as getter to save a field in the [KeywordToken] subclass. 62 * Defined as getter to save a field in the [KeywordToken] subclass.
55 */ 63 */
56 PrecedenceInfo get info; 64 PrecedenceInfo get info;
57 65
58 /** 66 /**
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after
157 analyzer.Token get previous => previousToken; 165 analyzer.Token get previous => previousToken;
158 166
159 @override 167 @override
160 set previous(analyzer.Token newToken) { 168 set previous(analyzer.Token newToken) {
161 previousToken = newToken as Token; 169 previousToken = newToken as Token;
162 } 170 }
163 171
164 @override 172 @override
165 void applyDelta(int delta) { 173 void applyDelta(int delta) {
166 charOffset += delta; 174 charOffset += delta;
167 Token token = precedingComments; 175 CommentToken token = precedingComments;
168 while (token != null) { 176 while (token != null) {
169 token.applyDelta(delta); 177 token.applyDelta(delta);
170 token = token.next; 178 token = token.next;
171 } 179 }
172 } 180 }
173 181
174 @override 182 @override
175 analyzer.Token copy() { 183 analyzer.Token copy() {
176 return copyWithoutComments() 184 return copyWithoutComments()
177 ..precedingComments = copyComments(precedingComments) as Token; 185 ..precedingComments = copyComments(precedingComments);
178 } 186 }
179 187
180 @override 188 @override
181 analyzer.Token copyComments(analyzer.Token token) { 189 analyzer.Token copyComments(analyzer.Token token) {
182 if (token == null) { 190 if (token == null) {
183 return null; 191 return null;
184 } 192 }
185 Token head = token.copy(); 193 Token head = token.copy();
186 Token tail = head; 194 Token tail = head;
187 token = token.next; 195 token = token.next;
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
295 303
296 @override 304 @override
297 Object value() => keyword; 305 Object value() => keyword;
298 } 306 }
299 307
300 /** 308 /**
301 * A String-valued token. Represents identifiers, string literals, 309 * A String-valued token. Represents identifiers, string literals,
302 * number literals, comments, and error tokens, using the corresponding 310 * number literals, comments, and error tokens, using the corresponding
303 * precedence info. 311 * precedence info.
304 */ 312 */
305 class StringToken extends Token { 313 class StringToken extends Token implements analyzer.StringToken {
306 /** 314 /**
307 * The length threshold above which substring tokens are computed lazily. 315 * The length threshold above which substring tokens are computed lazily.
308 * 316 *
309 * For string tokens that are substrings of the program source, the actual 317 * For string tokens that are substrings of the program source, the actual
310 * substring extraction is performed lazily. This is beneficial because 318 * substring extraction is performed lazily. This is beneficial because
311 * not all scanned code is actually used. For unused parts, the substrings 319 * not all scanned code is actually used. For unused parts, the substrings
312 * are never computed and allocated. 320 * are never computed and allocated.
313 */ 321 */
314 static const int LAZY_THRESHOLD = 4; 322 static const int LAZY_THRESHOLD = 4;
315 323
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
397 return canonicalizer.canonicalize(s, start, end, false); 405 return canonicalizer.canonicalize(s, start, end, false);
398 } 406 }
399 407
400 static String decodeUtf8(List<int> data, int start, int end, bool asciiOnly) { 408 static String decodeUtf8(List<int> data, int start, int end, bool asciiOnly) {
401 return canonicalizer.canonicalize(data, start, end, asciiOnly); 409 return canonicalizer.canonicalize(data, start, end, asciiOnly);
402 } 410 }
403 411
404 @override 412 @override
405 Token copyWithoutComments() => 413 Token copyWithoutComments() =>
406 new StringToken._(info, valueOrLazySubstring, charOffset); 414 new StringToken._(info, valueOrLazySubstring, charOffset);
415
416 @override
417 String value() => lexeme;
418 }
419
420 class CommentToken extends StringToken implements analyzer.CommentToken {
421 /**
422 * Creates a lazy comment token. If [canonicalize] is true, the string
423 * is canonicalized before the token is created.
424 */
425 CommentToken.fromSubstring(
426 PrecedenceInfo info, String data, int start, int end, int charOffset,
427 {bool canonicalize: false})
428 : super.fromSubstring(info, data, start, end, charOffset,
429 canonicalize: canonicalize);
430
431 /**
432 * Creates a lazy string token. If [asciiOnly] is false, the byte array
433 * is passed through a UTF-8 decoder.
434 */
435 CommentToken.fromUtf8Bytes(PrecedenceInfo info, List<int> data, int start,
436 int end, bool asciiOnly, int charOffset)
437 : super.fromUtf8Bytes(info, data, start, end, asciiOnly, charOffset);
438
439 CommentToken._(PrecedenceInfo info, valueOrLazySubstring, int charOffset)
440 : super._(info, valueOrLazySubstring, charOffset);
441
442 @override
443 CommentToken copy() =>
444 new CommentToken._(info, valueOrLazySubstring, charOffset);
445
446 @override
447 analyzer.TokenWithComment get parent {
448 Token token = next;
449 while (token is CommentToken) {
450 token = token.next;
451 }
452 return token;
453 }
454
455 @override
456 void set parent(analyzer.TokenWithComment ignored) {
457 throw 'unsupported operation';
458 }
459
460 @override
461 void remove() {
462 // TODO: implement remove
463 throw 'not implemented yet';
464 }
465 }
466
467 class DartDocToken extends CommentToken
468 implements analyzer.DocumentationCommentToken {
469 /**
470 * The references embedded within the documentation comment.
471 * This list will be empty unless this is a documentation comment that has
472 * references embedded within it.
473 */
474 final List<Token> references = <Token>[];
475
476 /**
477 * Creates a lazy comment token. If [canonicalize] is true, the string
478 * is canonicalized before the token is created.
479 */
480 DartDocToken.fromSubstring(
481 PrecedenceInfo info, String data, int start, int end, int charOffset,
482 {bool canonicalize: false})
483 : super.fromSubstring(info, data, start, end, charOffset,
484 canonicalize: canonicalize);
485
486 /**
487 * Creates a lazy string token. If [asciiOnly] is false, the byte array
488 * is passed through a UTF-8 decoder.
489 */
490 DartDocToken.fromUtf8Bytes(PrecedenceInfo info, List<int> data, int start,
491 int end, bool asciiOnly, int charOffset)
492 : super.fromUtf8Bytes(info, data, start, end, asciiOnly, charOffset);
493
494 DartDocToken._(PrecedenceInfo info, valueOrLazySubstring, int charOffset)
495 : super._(info, valueOrLazySubstring, charOffset);
496
497 @override
498 DartDocToken copy() {
499 DartDocToken copy =
500 new DartDocToken._(info, valueOrLazySubstring, charOffset);
501 references.forEach((ref) => copy.references.add(ref.copy()));
502 return copy;
503 }
407 } 504 }
408 505
409 /** 506 /**
410 * This class represents the necessary information to compute a substring 507 * This class represents the necessary information to compute a substring
411 * lazily. The substring can either originate from a string or from 508 * lazily. The substring can either originate from a string or from
412 * a [:List<int>:] of UTF-8 bytes. 509 * a [:List<int>:] of UTF-8 bytes.
413 */ 510 */
414 abstract class LazySubstring { 511 abstract class LazySubstring {
415 /** The original data, either a string or a List<int> */ 512 /** The original data, either a string or a List<int> */
416 get data; 513 get data;
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
495 identical(value, "<=") || 592 identical(value, "<=") ||
496 identical(value, "<") || 593 identical(value, "<") ||
497 identical(value, "&") || 594 identical(value, "&") ||
498 identical(value, "^") || 595 identical(value, "^") ||
499 identical(value, "|"); 596 identical(value, "|");
500 } 597 }
501 598
502 bool isTernaryOperator(String value) => identical(value, "[]="); 599 bool isTernaryOperator(String value) => identical(value, "[]=");
503 600
504 bool isMinusOperator(String value) => identical(value, "-"); 601 bool isMinusOperator(String value) => identical(value, "-");
OLDNEW
« no previous file with comments | « pkg/front_end/lib/src/fasta/scanner/string_scanner.dart ('k') | pkg/front_end/lib/src/fasta/scanner/utf8_bytes_scanner.dart » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698