| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file | |
| 2 // for details. All rights reserved. Use of this source code is governed by a | |
| 3 // BSD-style license that can be found in the LICENSE file. | |
| 4 | |
| 5 @deprecated | |
| 6 library analyzer.src.generated.incremental_scanner; | |
| 7 | |
| 8 import "dart:math" as math; | |
| 9 | |
| 10 import 'package:analyzer/dart/ast/token.dart'; | |
| 11 import 'package:analyzer/src/dart/scanner/reader.dart'; | |
| 12 import 'package:analyzer/src/dart/scanner/scanner.dart'; | |
| 13 import 'package:analyzer/src/generated/engine.dart'; | |
| 14 import 'package:analyzer/src/generated/error.dart'; | |
| 15 import 'package:analyzer/src/generated/source.dart'; | |
| 16 import 'package:analyzer/src/generated/utilities_collection.dart' show TokenMap; | |
| 17 | |
| 18 /** | |
| 19 * An `IncrementalScanner` is a scanner that scans a subset of a string and | |
| 20 * inserts the resulting tokens into the middle of an existing token stream. | |
| 21 */ | |
| 22 @deprecated | |
| 23 class IncrementalScanner { | |
| 24 /** | |
| 25 * The source being scanned. | |
| 26 */ | |
| 27 final Source source; | |
| 28 | |
| 29 /** | |
| 30 * The reader used to access the characters in the source. | |
| 31 */ | |
| 32 final CharacterReader reader; | |
| 33 | |
| 34 /** | |
| 35 * The error listener that will be informed of any errors that are found | |
| 36 * during the scan. | |
| 37 * | |
| 38 * TODO(brianwilkerson) Replace this with a list of errors so that we can | |
| 39 * update the errors. | |
| 40 */ | |
| 41 final AnalysisErrorListener errorListener; | |
| 42 | |
| 43 final AnalysisOptions _options; | |
| 44 | |
| 45 /** | |
| 46 * A map from tokens that were copied to the copies of the tokens. | |
| 47 */ | |
| 48 TokenMap _tokenMap = new TokenMap(); | |
| 49 | |
| 50 /** | |
| 51 * The token immediately to the left of the range of tokens that were | |
| 52 * modified. | |
| 53 */ | |
| 54 Token leftToken; | |
| 55 | |
| 56 /** | |
| 57 * The token immediately to the right of the range of tokens that were | |
| 58 * modified. | |
| 59 */ | |
| 60 Token rightToken; | |
| 61 | |
| 62 /** | |
| 63 * A flag indicating whether there were any non-comment tokens changed (other | |
| 64 * than having their position updated) as a result of the modification. | |
| 65 */ | |
| 66 bool hasNonWhitespaceChange = false; | |
| 67 | |
| 68 /** | |
| 69 * Initialize a newly created scanner to scan characters within the given | |
| 70 * [source]. The content of the source can be read using the given [reader]. | |
| 71 * Any errors that are found will be reported to the given [errorListener]. | |
| 72 * [_options] will determine how scanning is to be performed. | |
| 73 */ | |
| 74 IncrementalScanner( | |
| 75 this.source, this.reader, this.errorListener, this._options); | |
| 76 | |
| 77 /** | |
| 78 * Return a map from tokens that were copied to the copies of the tokens. | |
| 79 * | |
| 80 * @return a map from tokens that were copied to the copies of the tokens | |
| 81 */ | |
| 82 TokenMap get tokenMap => _tokenMap; | |
| 83 | |
| 84 /** | |
| 85 * Given the [stream] of tokens scanned from the original source, the modified | |
| 86 * source (the result of replacing one contiguous range of characters with | |
| 87 * another string of characters), and a specification of the modification that | |
| 88 * was made, update the token stream to reflect the modified source. Return | |
| 89 * the first token in the updated token stream. | |
| 90 * | |
| 91 * The [stream] is expected to be the first non-EOF token in the token stream. | |
| 92 * | |
| 93 * The modification is specified by the [index] of the first character in both | |
| 94 * the original and modified source that was affected by the modification, the | |
| 95 * number of characters removed from the original source (the [removedLength]) | |
| 96 * and the number of characters added to the modified source (the | |
| 97 * [insertedLength]). | |
| 98 */ | |
| 99 Token rescan(Token stream, int index, int removedLength, int insertedLength) { | |
| 100 Token leftEof = stream.previous; | |
| 101 // | |
| 102 // Compute the delta between the character index of characters after the | |
| 103 // modified region in the original source and the index of the corresponding | |
| 104 // character in the modified source. | |
| 105 // | |
| 106 int delta = insertedLength - removedLength; | |
| 107 // | |
| 108 // Skip past the tokens whose end is less than the replacement start. (If | |
| 109 // the replacement start is equal to the end of an existing token, then it | |
| 110 // means that the existing token might have been modified, so we need to | |
| 111 // rescan it.) | |
| 112 // | |
| 113 while (stream.type != TokenType.EOF && stream.end < index) { | |
| 114 _tokenMap.put(stream, stream); | |
| 115 stream = stream.next; | |
| 116 } | |
| 117 Token oldFirst = stream; | |
| 118 Token oldLeftToken = stream.previous; | |
| 119 leftToken = oldLeftToken; | |
| 120 // | |
| 121 // Skip past tokens until we find a token whose offset is greater than the | |
| 122 // end of the removed region. (If the end of the removed region is equal to | |
| 123 // the beginning of an existing token, then it means that the existing token | |
| 124 // might have been modified, so we need to rescan it.) | |
| 125 // | |
| 126 int removedEnd = index + (removedLength == 0 ? 0 : removedLength - 1); | |
| 127 while (stream.type != TokenType.EOF && stream.offset <= removedEnd) { | |
| 128 stream = stream.next; | |
| 129 } | |
| 130 // | |
| 131 // Figure out which region of characters actually needs to be re-scanned. | |
| 132 // | |
| 133 Token oldLast; | |
| 134 Token oldRightToken; | |
| 135 if (stream.type != TokenType.EOF && removedEnd + 1 == stream.offset) { | |
| 136 oldLast = stream; | |
| 137 stream = stream.next; | |
| 138 oldRightToken = stream; | |
| 139 } else { | |
| 140 oldLast = stream.previous; | |
| 141 oldRightToken = stream; | |
| 142 } | |
| 143 // | |
| 144 // Compute the range of characters that are known to need to be rescanned. | |
| 145 // If the index is within an existing token, then we need to start at the | |
| 146 // beginning of the token. | |
| 147 // | |
| 148 int scanStart = math.max(oldLeftToken.end, 0); | |
| 149 int scanEnd = oldRightToken.offset + delta; | |
| 150 // | |
| 151 // Rescan the characters that need to be rescanned. | |
| 152 // | |
| 153 Token replacementStart = _scanRange(scanStart, scanEnd); | |
| 154 oldLeftToken.setNext(replacementStart); | |
| 155 Token replacementEnd = _findEof(replacementStart).previous; | |
| 156 replacementEnd.setNext(stream); | |
| 157 // | |
| 158 // Apply the delta to the tokens after the last new token. | |
| 159 // | |
| 160 _updateOffsets(stream, delta); | |
| 161 rightToken = stream; | |
| 162 // | |
| 163 // If the index is immediately after an existing token and the inserted | |
| 164 // characters did not change that original token, then adjust the leftToken | |
| 165 // to be the next token. For example, in "a; c;" --> "a;b c;", the leftToken | |
| 166 // was ";", but this code advances it to "b" since "b" is the first new | |
| 167 // token. | |
| 168 // | |
| 169 Token newFirst = leftToken.next; | |
| 170 while (!identical(newFirst, rightToken) && | |
| 171 !identical(oldFirst, oldRightToken) && | |
| 172 newFirst.type != TokenType.EOF && | |
| 173 _equalTokens(oldFirst, newFirst)) { | |
| 174 _tokenMap.put(oldFirst, newFirst); | |
| 175 oldLeftToken = oldFirst; | |
| 176 oldFirst = oldFirst.next; | |
| 177 leftToken = newFirst; | |
| 178 newFirst = newFirst.next; | |
| 179 } | |
| 180 Token newLast = rightToken.previous; | |
| 181 while (!identical(newLast, leftToken) && | |
| 182 !identical(oldLast, oldLeftToken) && | |
| 183 newLast.type != TokenType.EOF && | |
| 184 _equalTokens(oldLast, newLast)) { | |
| 185 _tokenMap.put(oldLast, newLast); | |
| 186 oldRightToken = oldLast; | |
| 187 oldLast = oldLast.previous; | |
| 188 rightToken = newLast; | |
| 189 newLast = newLast.previous; | |
| 190 } | |
| 191 hasNonWhitespaceChange = !identical(leftToken.next, rightToken) || | |
| 192 !identical(oldLeftToken.next, oldRightToken); | |
| 193 // | |
| 194 // TODO(brianwilkerson) Begin tokens are not getting associated with the | |
| 195 // corresponding end tokens (because the end tokens have not been copied | |
| 196 // when we're copying the begin tokens). This could have implications for | |
| 197 // parsing. | |
| 198 // TODO(brianwilkerson) Update the lineInfo. | |
| 199 // | |
| 200 return leftEof.next; | |
| 201 } | |
| 202 | |
| 203 /** | |
| 204 * Return `true` if the [oldToken] and the [newToken] are equal to each other. | |
| 205 * For the purposes of the incremental scanner, two tokens are equal if they | |
| 206 * have the same type and lexeme. | |
| 207 */ | |
| 208 bool _equalTokens(Token oldToken, Token newToken) => | |
| 209 oldToken.type == newToken.type && | |
| 210 oldToken.length == newToken.length && | |
| 211 oldToken.lexeme == newToken.lexeme; | |
| 212 | |
| 213 /** | |
| 214 * Given a [token], return the EOF token that follows the token. | |
| 215 */ | |
| 216 Token _findEof(Token token) { | |
| 217 while (token.type != TokenType.EOF) { | |
| 218 token = token.next; | |
| 219 } | |
| 220 return token; | |
| 221 } | |
| 222 | |
| 223 /** | |
| 224 * Scan the token between the [start] (inclusive) and [end] (exclusive) | |
| 225 * offsets. | |
| 226 */ | |
| 227 Token _scanRange(int start, int end) { | |
| 228 Scanner scanner = new Scanner( | |
| 229 source, new CharacterRangeReader(reader, start, end), errorListener); | |
| 230 return scanner.tokenize(); | |
| 231 } | |
| 232 | |
| 233 /** | |
| 234 * Update the offsets of every token from the given [token] to the end of the | |
| 235 * stream by adding the given [delta]. | |
| 236 */ | |
| 237 void _updateOffsets(Token token, int delta) { | |
| 238 while (token.type != TokenType.EOF) { | |
| 239 _tokenMap.put(token, token); | |
| 240 token.offset += delta; | |
| 241 Token comment = token.precedingComments; | |
| 242 while (comment != null) { | |
| 243 comment.offset += delta; | |
| 244 comment = comment.next; | |
| 245 } | |
| 246 token = token.next; | |
| 247 } | |
| 248 _tokenMap.put(token, token); | |
| 249 token.offset += delta; | |
| 250 } | |
| 251 } | |
| OLD | NEW |