Chromium Code Reviews| Index: pkg/analyzer/lib/src/dart/analysis/driver.dart |
| diff --git a/pkg/analyzer/lib/src/dart/analysis/driver.dart b/pkg/analyzer/lib/src/dart/analysis/driver.dart |
| index ee355c454854ac61277e113963616698cd569a26..e00b661ceb0d00c517c0db4405859dec105b5737 100644 |
| --- a/pkg/analyzer/lib/src/dart/analysis/driver.dart |
| +++ b/pkg/analyzer/lib/src/dart/analysis/driver.dart |
| @@ -22,6 +22,7 @@ import 'package:analyzer/src/summary/idl.dart'; |
| import 'package:analyzer/src/summary/link.dart'; |
| import 'package:analyzer/src/summary/package_bundle_reader.dart'; |
| import 'package:analyzer/src/summary/summarize_elements.dart'; |
| +import 'package:meta/meta.dart'; |
| /** |
| * This class computes [AnalysisResult]s for Dart files. |
| @@ -68,7 +69,20 @@ class AnalysisDriver { |
| */ |
| static const int DATA_VERSION = 1; |
| + /** |
| + * The name of the driver, e.g. the name of the folder. |
| + */ |
| String name; |
| + |
| + /** |
| + * The scheduler that schedules analysis work in this, and possible other |
|
Paul Berry
2016/11/08 22:43:26
s/possible/possibly/
scheglov
2016/11/09 01:54:50
Done.
|
| + * analysis drivers. |
| + */ |
| + final AnalysisDriverScheduler _scheduler; |
| + |
| + /** |
| + * The logger to write performed operations and performance to. |
| + */ |
| final PerformanceLog _logger; |
| /** |
| @@ -149,9 +163,9 @@ class AnalysisDriver { |
| final _dependencySignatureMap = <Uri, String>{}; |
| /** |
| - * The monitor that is signalled when there is work to do. |
| + * The controller for the [results] stream. |
| */ |
| - final _Monitor _hasWork = new _Monitor(); |
| + final _resultController = new StreamController<AnalysisResult>(); |
| /** |
| * The controller for the [status] stream. |
| @@ -169,13 +183,20 @@ class AnalysisDriver { |
| * The given [SourceFactory] is cloned to ensure that it does not contain a |
| * reference to a [AnalysisContext] in which it could have been used. |
| */ |
| - AnalysisDriver(this._logger, this._resourceProvider, this._byteStore, |
| - this._contentOverlay, SourceFactory sourceFactory, this._analysisOptions) |
| + AnalysisDriver( |
| + this._scheduler, |
| + this._logger, |
| + this._resourceProvider, |
| + this._byteStore, |
| + this._contentOverlay, |
| + SourceFactory sourceFactory, |
| + this._analysisOptions) |
| : _sourceFactory = sourceFactory.clone() { |
| _fillSalt(); |
| _sdkBundle = sourceFactory.dartSdk.getLinkedBundle(); |
| _fsState = new FileSystemState(_logger, _byteStore, _contentOverlay, |
| _resourceProvider, _sourceFactory, _analysisOptions, _salt); |
| + _scheduler._add(this); |
| } |
| /** |
| @@ -191,19 +212,18 @@ class AnalysisDriver { |
| _priorityFiles.clear(); |
| _priorityFiles.addAll(priorityPaths); |
| _transitionToAnalyzing(); |
| - _hasWork.notify(); |
| + _scheduler._notify(this); |
| } |
| /** |
| * Return the [Stream] that produces [AnalysisResult]s for added files. |
| * |
| - * Analysis starts when the client starts listening to the stream, and stops |
| - * when the client cancels the subscription. Note that the stream supports |
| - * only one single subscriber. |
| + * Note that the stream supports only one single subscriber. |
| * |
| - * When the client starts listening, the analysis state transitions to |
| - * "analyzing" and an analysis result is produced for every added file prior |
| - * to the next time the analysis state transitions to "idle". |
| + * Analysis starts when the [AnalysisDriverScheduler] is started and the |
| + * driver is added to it. The analysis state transitions to "analyzing" and |
| + * an analysis result is produced for every added file prior to the next time |
| + * the analysis state transitions to "idle". |
| * |
| * At least one analysis result is produced for every file passed to |
| * [addFile] or [changeFile] prior to the next time the analysis state |
| @@ -217,97 +237,38 @@ class AnalysisDriver { |
| * Results might be produced even for files that have never been added |
| * using [addFile], for example when [getResult] was called for a file. |
| */ |
| - Stream<AnalysisResult> get results async* { |
| - try { |
| - PerformanceLogSection analysisSection = null; |
| - while (true) { |
| - // Pump the event queue to allow IO and other asynchronous data |
| - // processing while analysis is active. For example Analysis Server |
| - // needs to be able to process `updateContent` or `setPriorityFiles` |
| - // requests while background analysis is in progress. |
| - // |
| - // The number of pumpings is arbitrary, might be changed if we see that |
| - // analysis or other data processing tasks are starving. Ideally we |
| - // would need to be able to set priority of (continuous) asynchronous |
| - // tasks. |
| - await _pumpEventQueue(128); |
| - |
| - await _hasWork.signal; |
| - |
| - if (analysisSection == null) { |
| - analysisSection = _logger.enter('Analyzing'); |
| - } |
| + Stream<AnalysisResult> get results => _resultController.stream; |
| - // Verify all changed files one at a time. |
| - if (_changedFiles.isNotEmpty) { |
| - String path = _removeFirst(_changedFiles); |
| - _verifyApiSignature(path); |
| - // Repeat the processing loop. |
| - _hasWork.notify(); |
| - continue; |
| - } |
| - |
| - // Analyze a requested file. |
| - if (_requestedFiles.isNotEmpty) { |
| - String path = _requestedFiles.keys.first; |
| - AnalysisResult result = _computeAnalysisResult(path, withUnit: true); |
| - // Notify the completers. |
| - _requestedFiles.remove(path).forEach((completer) { |
| - completer.complete(result); |
| - }); |
| - // Remove from to be analyzed and produce it now. |
| - _filesToAnalyze.remove(path); |
| - yield result; |
| - // Repeat the processing loop. |
| - _hasWork.notify(); |
| - continue; |
| - } |
| - |
| - // Analyze a priority file. |
| - if (_priorityFiles.isNotEmpty) { |
| - bool analyzed = false; |
| - for (String path in _priorityFiles) { |
| - if (_filesToAnalyze.remove(path)) { |
| - analyzed = true; |
| - AnalysisResult result = |
| - _computeAnalysisResult(path, withUnit: true); |
| - yield result; |
| - break; |
| - } |
| - } |
| - // Repeat the processing loop. |
| - if (analyzed) { |
| - _hasWork.notify(); |
| - continue; |
| - } |
| - } |
| + /** |
| + * Return the stream that produces [AnalysisStatus] events. |
| + */ |
| + Stream<AnalysisStatus> get status => _statusController.stream; |
| - // Analyze a general file. |
| - if (_filesToAnalyze.isNotEmpty) { |
| - String path = _removeFirst(_filesToAnalyze); |
| - AnalysisResult result = _computeAnalysisResult(path, withUnit: false); |
| - yield result; |
| - // Repeat the processing loop. |
| - _hasWork.notify(); |
| - continue; |
| + /** |
| + * Return the priority of work that the driver needs to perform. |
| + */ |
| + AnalysisDriverPriority get _workPriority { |
| + if (_requestedFiles.isNotEmpty) { |
| + return AnalysisDriverPriority.interactive; |
| + } |
| + if (_priorityFiles.isNotEmpty) { |
| + for (String path in _priorityFiles) { |
| + if (_filesToAnalyze.contains(path)) { |
| + return AnalysisDriverPriority.priority; |
| } |
| - |
| - // There is nothing to do. |
| - analysisSection.exit(); |
| - analysisSection = null; |
| - _transitionToIdle(); |
| } |
| - } finally { |
| - print('The stream was cancelled.'); |
| } |
| + if (_filesToAnalyze.isNotEmpty) { |
| + return AnalysisDriverPriority.general; |
| + } |
| + if (_changedFiles.isNotEmpty) { |
| + return AnalysisDriverPriority.general; |
| + } |
| + _transitionToIdle(); |
| + return AnalysisDriverPriority.nothing; |
| } |
| /** |
| - * Return the stream that produces [AnalysisStatus] events. |
| - */ |
| - Stream<AnalysisStatus> get status => _statusController.stream; |
| - |
| - /** |
| * Add the file with the given [path] to the set of files to analyze. |
| * |
| * The [path] must be absolute and normalized. |
| @@ -320,7 +281,7 @@ class AnalysisDriver { |
| _filesToAnalyze.add(path); |
| } |
| _transitionToAnalyzing(); |
| - _hasWork.notify(); |
| + _scheduler._notify(this); |
| } |
| /** |
| @@ -349,7 +310,14 @@ class AnalysisDriver { |
| } |
| } |
| _transitionToAnalyzing(); |
| - _hasWork.notify(); |
| + _scheduler._notify(this); |
| + } |
| + |
| + /** |
| + * Notify the driver that the client is going to stop using it. |
| + */ |
| + void dispose() { |
| + _scheduler._remove(this); |
| } |
| /** |
| @@ -372,7 +340,7 @@ class AnalysisDriver { |
| .putIfAbsent(path, () => <Completer<AnalysisResult>>[]) |
| .add(completer); |
| _transitionToAnalyzing(); |
| - _hasWork.notify(); |
| + _scheduler._notify(this); |
| return completer.future; |
| } |
| @@ -675,6 +643,51 @@ class AnalysisDriver { |
| } |
| /** |
| + * Perform a single chunk of work and produce [results]. |
| + */ |
| + Future<Null> _performWork() async { |
| + // Verify all changed files one at a time. |
| + if (_changedFiles.isNotEmpty) { |
| + String path = _removeFirst(_changedFiles); |
| + _verifyApiSignature(path); |
| + return; |
| + } |
| + |
| + // Analyze a requested file. |
| + if (_requestedFiles.isNotEmpty) { |
| + String path = _requestedFiles.keys.first; |
| + AnalysisResult result = _computeAnalysisResult(path, withUnit: true); |
| + // Notify the completers. |
| + _requestedFiles.remove(path).forEach((completer) { |
| + completer.complete(result); |
| + }); |
| + // Remove from to be analyzed and produce it now. |
| + _filesToAnalyze.remove(path); |
| + _resultController.add(result); |
| + return; |
| + } |
| + |
| + // Analyze a priority file. |
| + if (_priorityFiles.isNotEmpty) { |
| + for (String path in _priorityFiles) { |
| + if (_filesToAnalyze.remove(path)) { |
| + AnalysisResult result = _computeAnalysisResult(path, withUnit: true); |
| + _resultController.add(result); |
| + return; |
| + } |
| + } |
| + } |
| + |
| + // Analyze a general file. |
| + if (_filesToAnalyze.isNotEmpty) { |
| + String path = _removeFirst(_filesToAnalyze); |
| + AnalysisResult result = _computeAnalysisResult(path, withUnit: false); |
| + _resultController.add(result); |
| + return; |
| + } |
| + } |
| + |
| + /** |
| * Send a notifications to the [status] stream that the driver started |
| * analyzing. |
| */ |
| @@ -721,6 +734,126 @@ class AnalysisDriver { |
| } |
| /** |
| + * Remove and return the first item in the given [set]. |
| + */ |
| + static Object/*=T*/ _removeFirst/*<T>*/(LinkedHashSet<Object/*=T*/ > set) { |
| + Object/*=T*/ element = set.first; |
| + set.remove(element); |
| + return element; |
| + } |
| +} |
| + |
| +/** |
| + * Priorities of [AnalysisDriver] work. The closer a priority to the beginning |
| + * of the list, the earlier the corresponding [AnalysisDriver] should be asked |
| + * to perform work. |
| + */ |
| +@visibleForTesting |
| +enum AnalysisDriverPriority { interactive, priority, general, nothing } |
| + |
| +/** |
| + * Instances of this class schedule work in multiple [AnalysisDriver]s so that |
| + * work with the lowest priority is performed first. |
| + */ |
| +class AnalysisDriverScheduler { |
| + final PerformanceLog _logger; |
| + final List<AnalysisDriver> _drivers = []; |
| + final _Monitor _hasWork = new _Monitor(); |
| + |
| + bool _started = false; |
| + |
| + AnalysisDriverScheduler(this._logger); |
| + |
| + /** |
| + * Start the scheduler, so that any [AnalysisDriver] created before or |
| + * after will be asked to perform work. |
| + */ |
| + void start() { |
| + if (_started) { |
| + throw new StateError('The scheduler has already been started.'); |
| + } |
| + _started = true; |
| + _run(); |
| + } |
| + |
| + /** |
| + * Add the given [driver] and schedule it to perform its work. |
| + */ |
| + void _add(AnalysisDriver driver) { |
| + _drivers.add(driver); |
| + _hasWork.notify(); |
| + } |
| + |
| + /** |
| + * Notify that there is a change to the [driver], it it might need to |
| + * perform some work. |
| + */ |
| + void _notify(AnalysisDriver driver) { |
| + _hasWork.notify(); |
| + } |
| + |
| + /** |
| + * Remove the given [driver] from the scheduler, so that it will not be |
| + * asked to perform any new work. |
| + */ |
| + void _remove(AnalysisDriver driver) { |
| + _drivers.remove(driver); |
| + _hasWork.notify(); |
| + } |
| + |
| + /** |
| + * Run infinitely analysis cycle, selecting the drivers with the lowest |
| + * priority first. |
| + */ |
| + Future<Null> _run() async { |
| + PerformanceLogSection analysisSection; |
| + while (true) { |
| + // Pump the event queue to allow IO and other asynchronous data |
| + // processing while analysis is active. For example Analysis Server |
| + // needs to be able to process `updateContent` or `setPriorityFiles` |
| + // requests while background analysis is in progress. |
| + // |
| + // The number of pumpings is arbitrary, might be changed if we see that |
| + // analysis or other data processing tasks are starving. Ideally we |
| + // would need to be able to set priority of (continuous) asynchronous |
| + // tasks. |
| + await _pumpEventQueue(128); |
| + |
| + await _hasWork.signal; |
| + |
| + if (analysisSection == null) { |
| + analysisSection = _logger.enter('Analyzing'); |
| + } |
| + |
| + // Find the driver with the most priority work. |
|
Paul Berry
2016/11/08 22:43:26
s/most/highest/
scheglov
2016/11/09 01:54:50
Done.
|
| + AnalysisDriver bestDriver; |
| + AnalysisDriverPriority bestDriverPriority; |
| + for (AnalysisDriver driver in _drivers) { |
| + AnalysisDriverPriority priority = driver._workPriority; |
| + if (bestDriverPriority == null || |
| + priority.index < bestDriverPriority.index) { |
| + bestDriver = driver; |
| + bestDriverPriority = priority; |
| + } |
| + } |
| + |
| + // Continue to sleeping if no work to do. |
| + if (bestDriverPriority == null || |
| + bestDriverPriority == AnalysisDriverPriority.nothing) { |
| + analysisSection.exit(); |
| + analysisSection = null; |
| + continue; |
| + } |
| + |
| + // Ask the driver to perform a chunk of work. |
| + await bestDriver._performWork(); |
| + |
| + // Schedule one more cycle. |
| + _hasWork.notify(); |
| + } |
| + } |
| + |
| + /** |
| * Returns a [Future] that completes after performing [times] pumpings of |
| * the event queue. |
| */ |
| @@ -730,15 +863,6 @@ class AnalysisDriver { |
| } |
| return new Future.delayed(Duration.ZERO, () => _pumpEventQueue(times - 1)); |
| } |
| - |
| - /** |
| - * Remove and return the first item in the given [set]. |
| - */ |
| - static Object/*=T*/ _removeFirst/*<T>*/(LinkedHashSet<Object/*=T*/ > set) { |
| - Object/*=T*/ element = set.first; |
| - set.remove(element); |
| - return element; |
| - } |
| } |
| /** |