OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. |
| 4 |
| 5 library engine.incremental_scanner_test; |
| 6 |
| 7 import 'package:analyzer/src/generated/engine.dart'; |
| 8 import 'package:analyzer/src/generated/incremental_scanner.dart'; |
| 9 import 'package:analyzer/src/generated/scanner.dart'; |
| 10 import 'package:analyzer/src/generated/source.dart'; |
| 11 import 'package:unittest/unittest.dart'; |
| 12 |
| 13 import '../reflective_tests.dart'; |
| 14 import '../utils.dart'; |
| 15 import 'test_support.dart'; |
| 16 |
| 17 main() { |
| 18 initializeTestEnvironment(); |
| 19 runReflectiveTests(IncrementalScannerTest); |
| 20 } |
| 21 |
| 22 @reflectiveTest |
| 23 class IncrementalScannerTest extends EngineTestCase { |
| 24 /** |
| 25 * The first token from the token stream resulting from parsing the original |
| 26 * source, or `null` if [scan] has not been invoked. |
| 27 */ |
| 28 Token _originalTokens; |
| 29 |
| 30 /** |
| 31 * The scanner used to perform incremental scanning, or `null` if [scan] has |
| 32 * not been invoked. |
| 33 */ |
| 34 IncrementalScanner _incrementalScanner; |
| 35 |
| 36 /** |
| 37 * The first token from the token stream resulting from performing an |
| 38 * incremental scan, or `null` if [scan] has not been invoked. |
| 39 */ |
| 40 Token _incrementalTokens; |
| 41 |
| 42 void fail_insert_beginning() { |
| 43 // This is currently reporting the changed range as being from 0 to 5, but |
| 44 // that would force us to re-parse both classes, which is clearly |
| 45 // sub-optimal. |
| 46 // |
| 47 // "class B {}" |
| 48 // "class A {} class B {}" |
| 49 _scan("", "", "class A {} ", "class B {}"); |
| 50 _assertTokens(-1, 4, ["class", "A", "{", "}", "class", "B", "{", "}"]); |
| 51 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 52 } |
| 53 |
| 54 void fail_insert_comment_afterIdentifier() { |
| 55 // "a + b" |
| 56 // "a /* TODO */ + b" |
| 57 _scan("a", "", " /* TODO */", " + b"); |
| 58 _assertTokens(0, 1, ["a", "+", "b"]); |
| 59 _assertComments(1, ["/* TODO */"]); |
| 60 expect(_incrementalScanner.hasNonWhitespaceChange, isFalse); |
| 61 } |
| 62 |
| 63 void fail_insert_comment_beforeIdentifier() { |
| 64 // "a + b" |
| 65 // "a + /* TODO */ b" |
| 66 _scan("a + ", "", "/* TODO */ ", "b"); |
| 67 _assertTokens(1, 2, ["a", "+", "b"]); |
| 68 _assertComments(2, ["/* TODO */"]); |
| 69 expect(_incrementalScanner.hasNonWhitespaceChange, isFalse); |
| 70 } |
| 71 |
| 72 void fail_insert_inComment() { |
| 73 // "a /* TO */ b" |
| 74 // "a /* TODO */ b" |
| 75 _scan("a /* TO", "", "DO", " */ b"); |
| 76 _assertTokens(0, 1, ["a", "b"]); |
| 77 _assertComments(1, ["/* TODO */"]); |
| 78 expect(_incrementalScanner.hasNonWhitespaceChange, isFalse); |
| 79 } |
| 80 |
| 81 void test_delete_identifier_beginning() { |
| 82 // "abs + b;" |
| 83 // "s + b;" |
| 84 _scan("", "ab", "", "s + b;"); |
| 85 _assertTokens(-1, 1, ["s", "+", "b", ";"]); |
| 86 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 87 } |
| 88 |
| 89 void test_delete_identifier_end() { |
| 90 // "abs + b;" |
| 91 // "a + b;" |
| 92 _scan("a", "bs", "", " + b;"); |
| 93 _assertTokens(-1, 1, ["a", "+", "b", ";"]); |
| 94 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 95 } |
| 96 |
| 97 void test_delete_identifier_middle() { |
| 98 // "abs + b;" |
| 99 // "as + b;" |
| 100 _scan("a", "b", "", "s + b;"); |
| 101 _assertTokens(-1, 1, ["as", "+", "b", ";"]); |
| 102 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 103 } |
| 104 |
| 105 void test_delete_mergeTokens() { |
| 106 // "a + b + c;" |
| 107 // "ac;" |
| 108 _scan("a", " + b + ", "", "c;"); |
| 109 _assertTokens(-1, 1, ["ac", ";"]); |
| 110 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 111 } |
| 112 |
| 113 void test_delete_whitespace() { |
| 114 // "a + b + c;" |
| 115 // "a+ b + c;" |
| 116 _scan("a", " ", "", "+ b + c;"); |
| 117 _assertTokens(1, 2, ["a", "+", "b", "+", "c", ";"]); |
| 118 expect(_incrementalScanner.hasNonWhitespaceChange, isFalse); |
| 119 } |
| 120 |
| 121 void test_insert_convertOneFunctionToTwo_noOverlap() { |
| 122 // "f() {}" |
| 123 // "f() => 0; g() {}" |
| 124 _scan("f()", "", " => 0; g()", " {}"); |
| 125 _assertTokens( |
| 126 2, 9, ["f", "(", ")", "=>", "0", ";", "g", "(", ")", "{", "}"]); |
| 127 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 128 } |
| 129 |
| 130 void test_insert_convertOneFunctionToTwo_overlap() { |
| 131 // "f() {}" |
| 132 // "f() {} g() {}" |
| 133 _scan("f() {", "", "} g() {", "}"); |
| 134 _assertTokens(4, 10, ["f", "(", ")", "{", "}", "g", "(", ")", "{", "}"]); |
| 135 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 136 } |
| 137 |
| 138 void test_insert_end() { |
| 139 // "class A {}" |
| 140 // "class A {} class B {}" |
| 141 _scan("class A {}", "", " class B {}", ""); |
| 142 _assertTokens(3, 8, ["class", "A", "{", "}", "class", "B", "{", "}"]); |
| 143 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 144 } |
| 145 |
| 146 void test_insert_identifierAndPeriod() { |
| 147 // "a + b;" |
| 148 // "a + x.b;" |
| 149 _scan("a + ", "", "x.", "b;"); |
| 150 _assertTokens(1, 4, ["a", "+", "x", ".", "b", ";"]); |
| 151 } |
| 152 |
| 153 void test_insert_inIdentifier_left_firstToken() { |
| 154 // "a + b;" |
| 155 // "xa + b;" |
| 156 _scan("", "", "x", "a + b;"); |
| 157 _assertTokens(-1, 1, ["xa", "+", "b", ";"]); |
| 158 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 159 } |
| 160 |
| 161 void test_insert_inIdentifier_left_lastToken() { |
| 162 // "a + b" |
| 163 // "a + xb" |
| 164 _scan("a + ", "", "x", "b"); |
| 165 _assertTokens(1, 3, ["a", "+", "xb"]); |
| 166 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 167 } |
| 168 |
| 169 void test_insert_inIdentifier_left_middleToken() { |
| 170 // "a + b;" |
| 171 // "a + xb;" |
| 172 _scan("a + ", "", "x", "b;"); |
| 173 _assertTokens(1, 3, ["a", "+", "xb", ";"]); |
| 174 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 175 } |
| 176 |
| 177 void test_insert_inIdentifier_middle() { |
| 178 // "cat;" |
| 179 // "cart;" |
| 180 _scan("ca", "", "r", "t;"); |
| 181 _assertTokens(-1, 1, ["cart", ";"]); |
| 182 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 183 } |
| 184 |
| 185 void test_insert_inIdentifier_right_firstToken() { |
| 186 // "a + b;" |
| 187 // "abs + b;" |
| 188 _scan("a", "", "bs", " + b;"); |
| 189 _assertTokens(-1, 1, ["abs", "+", "b", ";"]); |
| 190 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 191 } |
| 192 |
| 193 void test_insert_inIdentifier_right_lastToken() { |
| 194 // "a + b" |
| 195 // "a + bc" |
| 196 _scan("a + b", "", "c", ""); |
| 197 _assertTokens(1, 3, ["a", "+", "bc"]); |
| 198 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 199 } |
| 200 |
| 201 void test_insert_inIdentifier_right_middleToken() { |
| 202 // "a + b;" |
| 203 // "a + by;" |
| 204 _scan("a + b", "", "y", ";"); |
| 205 _assertTokens(1, 3, ["a", "+", "by", ";"]); |
| 206 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 207 } |
| 208 |
| 209 void test_insert_newIdentifier_noSpaceBefore() { |
| 210 // "a; c;" |
| 211 // "a;b c;" |
| 212 _scan("a;", "", "b", " c;"); |
| 213 _assertTokens(1, 3, ["a", ";", "b", "c", ";"]); |
| 214 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 215 } |
| 216 |
| 217 void test_insert_newIdentifier_spaceBefore() { |
| 218 // "a; c;" |
| 219 // "a; b c;" |
| 220 _scan("a; ", "", "b ", "c;"); |
| 221 _assertTokens(1, 3, ["a", ";", "b", "c", ";"]); |
| 222 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 223 } |
| 224 |
| 225 void test_insert_period_afterIdentifier() { |
| 226 // "a + b;" |
| 227 // "a + b.;" |
| 228 _scan("a + b", "", ".", ";"); |
| 229 _assertTokens(2, 4, ["a", "+", "b", ".", ";"]); |
| 230 } |
| 231 |
| 232 void test_insert_period_betweenIdentifiers_left() { |
| 233 // "a b;" |
| 234 // "a. b;" |
| 235 _scan("a", "", ".", " b;"); |
| 236 _assertTokens(0, 2, ["a", ".", "b", ";"]); |
| 237 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 238 } |
| 239 |
| 240 void test_insert_period_betweenIdentifiers_middle() { |
| 241 // "a b;" |
| 242 // "a . b;" |
| 243 _scan("a ", "", ".", " b;"); |
| 244 _assertTokens(0, 2, ["a", ".", "b", ";"]); |
| 245 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 246 } |
| 247 |
| 248 void test_insert_period_betweenIdentifiers_right() { |
| 249 // "a b;" |
| 250 // "a .b;" |
| 251 _scan("a ", "", ".", "b;"); |
| 252 _assertTokens(0, 2, ["a", ".", "b", ";"]); |
| 253 } |
| 254 |
| 255 void test_insert_period_insideExistingIdentifier() { |
| 256 // "ab;" |
| 257 // "a.b;" |
| 258 _scan("a", "", ".", "b;"); |
| 259 _assertTokens(-1, 3, ["a", ".", "b", ";"]); |
| 260 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 261 } |
| 262 |
| 263 void test_insert_periodAndIdentifier() { |
| 264 // "a + b;" |
| 265 // "a + b.x;" |
| 266 _scan("a + b", "", ".x", ";"); |
| 267 _assertTokens(2, 5, ["a", "+", "b", ".", "x", ";"]); |
| 268 } |
| 269 |
| 270 void test_insert_splitIdentifier() { |
| 271 // "cob;" |
| 272 // "cow.b;" |
| 273 _scan("co", "", "w.", "b;"); |
| 274 _assertTokens(-1, 3, ["cow", ".", "b", ";"]); |
| 275 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 276 } |
| 277 |
| 278 void test_insert_tokens_within_whitespace() { |
| 279 // "a ;" |
| 280 // "a +b ;" |
| 281 _scan("a ", "", "+b", " ;"); |
| 282 _assertTokens(0, 3, ["a", "+", "b", ";"]); |
| 283 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 284 } |
| 285 |
| 286 void test_insert_whitespace_beginning_beforeToken() { |
| 287 // "a + b;" |
| 288 // " a + b;" |
| 289 _scan("", "", " ", "a + b;"); |
| 290 _assertTokens(0, 1, ["a", "+", "b", ";"]); |
| 291 expect(_incrementalScanner.hasNonWhitespaceChange, isFalse); |
| 292 } |
| 293 |
| 294 void test_insert_whitespace_betweenTokens() { |
| 295 // "a + b;" |
| 296 // "a + b;" |
| 297 _scan("a ", "", " ", "+ b;"); |
| 298 _assertTokens(1, 2, ["a", "+", "b", ";"]); |
| 299 expect(_incrementalScanner.hasNonWhitespaceChange, isFalse); |
| 300 } |
| 301 |
| 302 void test_insert_whitespace_end_afterToken() { |
| 303 // "a + b;" |
| 304 // "a + b; " |
| 305 _scan("a + b;", "", " ", ""); |
| 306 _assertTokens(3, 4, ["a", "+", "b", ";"]); |
| 307 expect(_incrementalScanner.hasNonWhitespaceChange, isFalse); |
| 308 } |
| 309 |
| 310 void test_insert_whitespace_end_afterWhitespace() { |
| 311 // "a + b; " |
| 312 // "a + b; " |
| 313 _scan("a + b; ", "", " ", ""); |
| 314 _assertTokens(3, 4, ["a", "+", "b", ";"]); |
| 315 expect(_incrementalScanner.hasNonWhitespaceChange, isFalse); |
| 316 } |
| 317 |
| 318 void test_insert_whitespace_withMultipleComments() { |
| 319 // "//comment1", "//comment2", "a + b;" |
| 320 // "//comment1", "//comment2", "a + b;" |
| 321 _scan( |
| 322 r''' |
| 323 //comment1 |
| 324 //comment2 |
| 325 a''', |
| 326 "", |
| 327 " ", |
| 328 " + b;"); |
| 329 _assertTokens(1, 2, ["a", "+", "b", ";"]); |
| 330 expect(_incrementalScanner.hasNonWhitespaceChange, isFalse); |
| 331 } |
| 332 |
| 333 void test_replace_identifier_beginning() { |
| 334 // "bell + b;" |
| 335 // "fell + b;" |
| 336 _scan("", "b", "f", "ell + b;"); |
| 337 _assertTokens(-1, 1, ["fell", "+", "b", ";"]); |
| 338 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 339 } |
| 340 |
| 341 void test_replace_identifier_end() { |
| 342 // "bell + b;" |
| 343 // "belt + b;" |
| 344 _scan("bel", "l", "t", " + b;"); |
| 345 _assertTokens(-1, 1, ["belt", "+", "b", ";"]); |
| 346 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 347 } |
| 348 |
| 349 void test_replace_identifier_middle() { |
| 350 // "first + b;" |
| 351 // "frost + b;" |
| 352 _scan("f", "ir", "ro", "st + b;"); |
| 353 _assertTokens(-1, 1, ["frost", "+", "b", ";"]); |
| 354 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 355 } |
| 356 |
| 357 void test_replace_multiple_partialFirstAndLast() { |
| 358 // "aa + bb;" |
| 359 // "ab * ab;" |
| 360 _scan("a", "a + b", "b * a", "b;"); |
| 361 _assertTokens(-1, 3, ["ab", "*", "ab", ";"]); |
| 362 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 363 } |
| 364 |
| 365 void test_replace_operator_oneForMany() { |
| 366 // "a + b;" |
| 367 // "a * c - b;" |
| 368 _scan("a ", "+", "* c -", " b;"); |
| 369 _assertTokens(0, 4, ["a", "*", "c", "-", "b", ";"]); |
| 370 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 371 } |
| 372 |
| 373 void test_replace_operator_oneForOne() { |
| 374 // "a + b;" |
| 375 // "a * b;" |
| 376 _scan("a ", "+", "*", " b;"); |
| 377 _assertTokens(0, 2, ["a", "*", "b", ";"]); |
| 378 expect(_incrementalScanner.hasNonWhitespaceChange, isTrue); |
| 379 } |
| 380 |
| 381 /** |
| 382 * Assert that the comments associated with the token at the given [index] |
| 383 * have lexemes that match the given list of lexemes, both in number and in |
| 384 * content. |
| 385 */ |
| 386 void _assertComments(int index, List<String> lexemes) { |
| 387 Token token = _incrementalTokens; |
| 388 for (int i = 0; i < index; i++) { |
| 389 token = token.next; |
| 390 } |
| 391 Token comment = token.precedingComments; |
| 392 if (lexemes.isEmpty) { |
| 393 expect(comment, isNull, |
| 394 reason: "No comments expected but comments found"); |
| 395 } |
| 396 int count = 0; |
| 397 for (String lexeme in lexemes) { |
| 398 if (comment == null) { |
| 399 fail("Expected ${lexemes.length} comments but found $count"); |
| 400 } |
| 401 expect(comment.lexeme, lexeme); |
| 402 count++; |
| 403 comment = comment.next; |
| 404 } |
| 405 if (comment != null) { |
| 406 while (comment != null) { |
| 407 count++; |
| 408 comment = comment.next; |
| 409 } |
| 410 fail("Expected ${lexemes.length} comments but found $count"); |
| 411 } |
| 412 } |
| 413 |
| 414 /** |
| 415 * Assert that the [expected] token is equal to the [actual] token. |
| 416 */ |
| 417 void _assertEqualTokens(Token actual, Token expected) { |
| 418 expect(actual.type, same(expected.type), reason: "Wrong type for token"); |
| 419 expect(actual.lexeme, expected.lexeme, reason: "Wrong lexeme for token"); |
| 420 expect(actual.offset, expected.offset, |
| 421 reason: |
| 422 "Wrong offset for token ('${actual.lexeme}' != '${expected.lexeme}')
"); |
| 423 expect(actual.length, expected.length, |
| 424 reason: |
| 425 "Wrong length for token ('${actual.lexeme}' != '${expected.lexeme}')
"); |
| 426 } |
| 427 |
| 428 /** |
| 429 * Assert that the result of the incremental scan matches the given list of |
| 430 * [lexemes] and that the left and right tokens correspond to the tokens at |
| 431 * the [leftIndex] and [rightIndex]. |
| 432 */ |
| 433 void _assertTokens(int leftIndex, int rightIndex, List<String> lexemes) { |
| 434 int count = lexemes.length; |
| 435 expect(leftIndex >= -1 && leftIndex < count, isTrue, |
| 436 reason: "Invalid left index"); |
| 437 expect(rightIndex >= 0 && rightIndex <= count, isTrue, |
| 438 reason: "Invalid right index"); |
| 439 Token leftToken = null; |
| 440 Token rightToken = null; |
| 441 Token token = _incrementalTokens; |
| 442 if (leftIndex < 0) { |
| 443 leftToken = token.previous; |
| 444 } |
| 445 for (int i = 0; i < count; i++) { |
| 446 expect(token.lexeme, lexemes[i]); |
| 447 if (i == leftIndex) { |
| 448 leftToken = token; |
| 449 } |
| 450 if (i == rightIndex) { |
| 451 rightToken = token; |
| 452 } |
| 453 token = token.next; |
| 454 } |
| 455 if (rightIndex >= count) { |
| 456 rightToken = token; |
| 457 } |
| 458 expect(token.type, same(TokenType.EOF), reason: "Too many tokens"); |
| 459 if (leftIndex >= 0) { |
| 460 expect(leftToken, isNotNull); |
| 461 } |
| 462 expect(_incrementalScanner.leftToken, same(leftToken), |
| 463 reason: "Invalid left token"); |
| 464 if (rightIndex >= 0) { |
| 465 expect(rightToken, isNotNull); |
| 466 } |
| 467 expect(_incrementalScanner.rightToken, same(rightToken), |
| 468 reason: "Invalid right token"); |
| 469 } |
| 470 |
| 471 /** |
| 472 * Given a description of the original and modified contents, perform an |
| 473 * incremental scan of the two pieces of text. Verify that the incremental |
| 474 * scan produced the same tokens as those that would be produced by a full |
| 475 * scan of the new contents. |
| 476 * |
| 477 * The original content is the concatenation of the [prefix], [removed] and |
| 478 * [suffix] fragments. The modeified content is the concatenation of the |
| 479 * [prefix], [added] and [suffix] fragments. |
| 480 */ |
| 481 void _scan(String prefix, String removed, String added, String suffix) { |
| 482 // |
| 483 // Compute the information needed to perform the test. |
| 484 // |
| 485 String originalContents = "$prefix$removed$suffix"; |
| 486 String modifiedContents = "$prefix$added$suffix"; |
| 487 int replaceStart = prefix.length; |
| 488 Source source = new TestSource(); |
| 489 // |
| 490 // Scan the original contents. |
| 491 // |
| 492 GatheringErrorListener originalListener = new GatheringErrorListener(); |
| 493 Scanner originalScanner = new Scanner( |
| 494 source, new CharSequenceReader(originalContents), originalListener); |
| 495 _originalTokens = originalScanner.tokenize(); |
| 496 expect(_originalTokens, isNotNull); |
| 497 // |
| 498 // Scan the modified contents. |
| 499 // |
| 500 GatheringErrorListener modifiedListener = new GatheringErrorListener(); |
| 501 Scanner modifiedScanner = new Scanner( |
| 502 source, new CharSequenceReader(modifiedContents), modifiedListener); |
| 503 Token modifiedTokens = modifiedScanner.tokenize(); |
| 504 expect(modifiedTokens, isNotNull); |
| 505 // |
| 506 // Incrementally scan the modified contents. |
| 507 // |
| 508 GatheringErrorListener incrementalListener = new GatheringErrorListener(); |
| 509 AnalysisOptionsImpl options = new AnalysisOptionsImpl(); |
| 510 _incrementalScanner = new IncrementalScanner(source, |
| 511 new CharSequenceReader(modifiedContents), incrementalListener, options); |
| 512 _incrementalTokens = _incrementalScanner.rescan( |
| 513 _originalTokens, replaceStart, removed.length, added.length); |
| 514 // |
| 515 // Validate that the results of the incremental scan are the same as the |
| 516 // full scan of the modified source. |
| 517 // |
| 518 Token incrementalToken = _incrementalTokens; |
| 519 expect(incrementalToken, isNotNull); |
| 520 while (incrementalToken.type != TokenType.EOF && |
| 521 modifiedTokens.type != TokenType.EOF) { |
| 522 _assertEqualTokens(incrementalToken, modifiedTokens); |
| 523 Token incrementalComment = incrementalToken.precedingComments; |
| 524 Token modifiedComment = modifiedTokens.precedingComments; |
| 525 while (incrementalComment != null && modifiedComment != null) { |
| 526 _assertEqualTokens(incrementalComment, modifiedComment); |
| 527 incrementalComment = incrementalComment.next; |
| 528 modifiedComment = modifiedComment.next; |
| 529 } |
| 530 expect(incrementalComment, isNull, |
| 531 reason: |
| 532 "Too many comment tokens preceeding '${incrementalToken.lexeme}'")
; |
| 533 expect(modifiedComment, isNull, |
| 534 reason: |
| 535 "Not enough comment tokens preceeding '${incrementalToken.lexeme}'
"); |
| 536 incrementalToken = incrementalToken.next; |
| 537 modifiedTokens = modifiedTokens.next; |
| 538 } |
| 539 expect(incrementalToken.type, same(TokenType.EOF), |
| 540 reason: "Too many tokens"); |
| 541 expect(modifiedTokens.type, same(TokenType.EOF), |
| 542 reason: "Not enough tokens"); |
| 543 // TODO(brianwilkerson) Verify that the errors are correct? |
| 544 } |
| 545 } |
OLD | NEW |