Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(48)

Side by Side Diff: analyzer/lib/src/generated/incremental_scanner.dart

Issue 1400473008: Roll Observatory packages and add a roll script (Closed) Base URL: git@github.com:dart-lang/observatory_pub_packages.git@master
Patch Set: Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file.
4
5 library engine.incremental_scanner;
6
7 import "dart:math" as math;
8
9 import 'package:analyzer/src/generated/engine.dart';
10
11 import 'error.dart';
12 import 'scanner.dart';
13 import 'source.dart';
14 import 'utilities_collection.dart' show TokenMap;
15
16 /**
17 * An `IncrementalScanner` is a scanner that scans a subset of a string and
18 * inserts the resulting tokens into the middle of an existing token stream.
19 */
20 class IncrementalScanner {
21 /**
22 * The source being scanned.
23 */
24 final Source source;
25
26 /**
27 * The reader used to access the characters in the source.
28 */
29 final CharacterReader reader;
30
31 /**
32 * The error listener that will be informed of any errors that are found
33 * during the scan.
34 *
35 * TODO(brianwilkerson) Replace this with a list of errors so that we can
36 * update the errors.
37 */
38 final AnalysisErrorListener errorListener;
39
40 final AnalysisOptions _options;
41
42 /**
43 * A map from tokens that were copied to the copies of the tokens.
44 */
45 TokenMap _tokenMap = new TokenMap();
46
47 /**
48 * The token immediately to the left of the range of tokens that were
49 * modified.
50 */
51 Token leftToken;
52
53 /**
54 * The token immediately to the right of the range of tokens that were
55 * modified.
56 */
57 Token rightToken;
58
59 /**
60 * A flag indicating whether there were any non-comment tokens changed (other
61 * than having their position updated) as a result of the modification.
62 */
63 bool hasNonWhitespaceChange = false;
64
65 /**
66 * Initialize a newly created scanner to scan characters within the given
67 * [source]. The content of the source can be read using the given [reader].
68 * Any errors that are found will be reported to the given [errorListener].
69 * [_options] will determine how scanning is to be performed.
70 */
71 IncrementalScanner(
72 this.source, this.reader, this.errorListener, this._options);
73
74 /**
75 * Return a map from tokens that were copied to the copies of the tokens.
76 *
77 * @return a map from tokens that were copied to the copies of the tokens
78 */
79 TokenMap get tokenMap => _tokenMap;
80
81 /**
82 * Given the [stream] of tokens scanned from the original source, the modified
83 * source (the result of replacing one contiguous range of characters with
84 * another string of characters), and a specification of the modification that
85 * was made, update the token stream to reflect the modified source. Return
86 * the first token in the updated token stream.
87 *
88 * The [stream] is expected to be the first non-EOF token in the token stream.
89 *
90 * The modification is specified by the [index] of the first character in both
91 * the original and modified source that was affected by the modification, the
92 * number of characters removed from the original source (the [removedLength])
93 * and the number of characters added to the modified source (the
94 * [insertedLength]).
95 */
96 Token rescan(Token stream, int index, int removedLength, int insertedLength) {
97 Token leftEof = stream.previous;
98 //
99 // Compute the delta between the character index of characters after the
100 // modified region in the original source and the index of the corresponding
101 // character in the modified source.
102 //
103 int delta = insertedLength - removedLength;
104 //
105 // Skip past the tokens whose end is less than the replacement start. (If
106 // the replacement start is equal to the end of an existing token, then it
107 // means that the existing token might have been modified, so we need to
108 // rescan it.)
109 //
110 while (stream.type != TokenType.EOF && stream.end < index) {
111 _tokenMap.put(stream, stream);
112 stream = stream.next;
113 }
114 Token oldFirst = stream;
115 Token oldLeftToken = stream.previous;
116 leftToken = oldLeftToken;
117 //
118 // Skip past tokens until we find a token whose offset is greater than the
119 // end of the removed region. (If the end of the removed region is equal to
120 // the beginning of an existing token, then it means that the existing token
121 // might have been modified, so we need to rescan it.)
122 //
123 int removedEnd = index + (removedLength == 0 ? 0 : removedLength - 1);
124 while (stream.type != TokenType.EOF && stream.offset <= removedEnd) {
125 stream = stream.next;
126 }
127 //
128 // Figure out which region of characters actually needs to be re-scanned.
129 //
130 Token oldLast;
131 Token oldRightToken;
132 if (stream.type != TokenType.EOF && removedEnd + 1 == stream.offset) {
133 oldLast = stream;
134 stream = stream.next;
135 oldRightToken = stream;
136 } else {
137 oldLast = stream.previous;
138 oldRightToken = stream;
139 }
140 //
141 // Compute the range of characters that are known to need to be rescanned.
142 // If the index is within an existing token, then we need to start at the
143 // beginning of the token.
144 //
145 int scanStart = math.max(oldLeftToken.end, 0);
146 int scanEnd = oldRightToken.offset + delta;
147 //
148 // Rescan the characters that need to be rescanned.
149 //
150 Token replacementStart = _scanRange(scanStart, scanEnd);
151 oldLeftToken.setNext(replacementStart);
152 Token replacementEnd = _findEof(replacementStart).previous;
153 replacementEnd.setNext(stream);
154 //
155 // Apply the delta to the tokens after the last new token.
156 //
157 _updateOffsets(stream, delta);
158 rightToken = stream;
159 //
160 // If the index is immediately after an existing token and the inserted
161 // characters did not change that original token, then adjust the leftToken
162 // to be the next token. For example, in "a; c;" --> "a;b c;", the leftToken
163 // was ";", but this code advances it to "b" since "b" is the first new
164 // token.
165 //
166 Token newFirst = leftToken.next;
167 while (!identical(newFirst, rightToken) &&
168 !identical(oldFirst, oldRightToken) &&
169 newFirst.type != TokenType.EOF &&
170 _equalTokens(oldFirst, newFirst)) {
171 _tokenMap.put(oldFirst, newFirst);
172 oldLeftToken = oldFirst;
173 oldFirst = oldFirst.next;
174 leftToken = newFirst;
175 newFirst = newFirst.next;
176 }
177 Token newLast = rightToken.previous;
178 while (!identical(newLast, leftToken) &&
179 !identical(oldLast, oldLeftToken) &&
180 newLast.type != TokenType.EOF &&
181 _equalTokens(oldLast, newLast)) {
182 _tokenMap.put(oldLast, newLast);
183 oldRightToken = oldLast;
184 oldLast = oldLast.previous;
185 rightToken = newLast;
186 newLast = newLast.previous;
187 }
188 hasNonWhitespaceChange = !identical(leftToken.next, rightToken) ||
189 !identical(oldLeftToken.next, oldRightToken);
190 //
191 // TODO(brianwilkerson) Begin tokens are not getting associated with the
192 // corresponding end tokens (because the end tokens have not been copied
193 // when we're copying the begin tokens). This could have implications for
194 // parsing.
195 // TODO(brianwilkerson) Update the lineInfo.
196 //
197 return leftEof.next;
198 }
199
200 /**
201 * Return `true` if the [oldToken] and the [newToken] are equal to each other.
202 * For the purposes of the incremental scanner, two tokens are equal if they
203 * have the same type and lexeme.
204 */
205 bool _equalTokens(Token oldToken, Token newToken) =>
206 oldToken.type == newToken.type &&
207 oldToken.length == newToken.length &&
208 oldToken.lexeme == newToken.lexeme;
209
210 /**
211 * Given a [token], return the EOF token that follows the token.
212 */
213 Token _findEof(Token token) {
214 while (token.type != TokenType.EOF) {
215 token = token.next;
216 }
217 return token;
218 }
219
220 /**
221 * Scan the token between the [start] (inclusive) and [end] (exclusive)
222 * offsets.
223 */
224 Token _scanRange(int start, int end) {
225 Scanner scanner = new Scanner(
226 source, new CharacterRangeReader(reader, start, end), errorListener);
227 return scanner.tokenize();
228 }
229
230 /**
231 * Update the offsets of every token from the given [token] to the end of the
232 * stream by adding the given [delta].
233 */
234 void _updateOffsets(Token token, int delta) {
235 while (token.type != TokenType.EOF) {
236 _tokenMap.put(token, token);
237 token.offset += delta;
238 Token comment = token.precedingComments;
239 while (comment != null) {
240 comment.offset += delta;
241 comment = comment.next;
242 }
243 token = token.next;
244 }
245 _tokenMap.put(token, token);
246 token.offset += delta;
247 }
248 }
OLDNEW
« no previous file with comments | « analyzer/lib/src/generated/incremental_resolver.dart ('k') | analyzer/lib/src/generated/interner.dart » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698