Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(4)

Side by Side Diff: mojo/public/dart/third_party/analyzer/lib/src/task/driver.dart

Issue 1346773002: Stop running pub get at gclient sync time and fix build bugs (Closed) Base URL: git@github.com:domokit/mojo.git@master
Patch Set: Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright (c) 2015, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file.
4
5 library analyzer.src.task.driver;
6
7 import 'dart:async';
8 import 'dart:collection';
9
10 import 'package:analyzer/src/context/cache.dart';
11 import 'package:analyzer/src/generated/engine.dart'
12 hide AnalysisTask, AnalysisContextImpl, WorkManager;
13 import 'package:analyzer/src/generated/java_engine.dart';
14 import 'package:analyzer/src/generated/resolver.dart';
15 import 'package:analyzer/src/generated/utilities_general.dart';
16 import 'package:analyzer/src/task/inputs.dart';
17 import 'package:analyzer/src/task/manager.dart';
18 import 'package:analyzer/task/model.dart';
19
20 final PerformanceTag workOrderMoveNextPerfTag =
21 new PerformanceTag('WorkOrder.moveNext');
22
23 /**
24 * An object that is used to cause analysis to be performed until all of the
25 * required analysis information has been computed.
26 */
27 class AnalysisDriver {
28 /**
29 * The task manager used to figure out how to compute analysis results.
30 */
31 final TaskManager taskManager;
32
33 /**
34 * The list of [WorkManager] used to figure out which analysis results to
35 * compute.
36 */
37 final List<WorkManager> workManagers;
38
39 /**
40 * The context in which analysis is to be performed.
41 */
42 final InternalAnalysisContext context;
43
44 /**
45 * The map of [ComputedResult] controllers.
46 */
47 final Map<ResultDescriptor,
48 StreamController<ComputedResult>> resultComputedControllers =
49 <ResultDescriptor, StreamController<ComputedResult>>{};
50
51 /**
52 * The work order that was previously computed but that has not yet been
53 * completed.
54 */
55 WorkOrder currentWorkOrder;
56
57 /**
58 * Indicates whether any tasks are currently being performed (or building
59 * their inputs). In debug builds, we use this to ensure that tasks don't
60 * try to make use of the task manager in reentrant fashion.
61 */
62 bool isTaskRunning = false;
63
64 /**
65 * The controller that is notified when a task is started.
66 */
67 StreamController<AnalysisTask> _onTaskStartedController;
68
69 /**
70 * The controller that is notified when a task is complete.
71 */
72 StreamController<AnalysisTask> _onTaskCompletedController;
73
74 /**
75 * Initialize a newly created driver to use the tasks know to the given
76 * [taskManager] to perform analysis in the given [context].
77 */
78 AnalysisDriver(this.taskManager, this.workManagers, this.context) {
79 _onTaskStartedController = new StreamController.broadcast();
80 _onTaskCompletedController = new StreamController.broadcast();
81 }
82
83 /**
84 * The stream that is notified when a task is complete.
85 */
86 Stream<AnalysisTask> get onTaskCompleted => _onTaskCompletedController.stream;
87
88 /**
89 * The stream that is notified when a task is started.
90 */
91 Stream<AnalysisTask> get onTaskStarted => _onTaskStartedController.stream;
92
93 /**
94 * Perform work until the given [result] has been computed for the given
95 * [target]. Return the last [AnalysisTask] that was performed.
96 */
97 AnalysisTask computeResult(AnalysisTarget target, ResultDescriptor result) {
98 assert(!isTaskRunning);
99 try {
100 isTaskRunning = true;
101 AnalysisTask task;
102 WorkOrder workOrder = createWorkOrderForResult(target, result);
103 if (workOrder != null) {
104 while (workOrder.moveNext()) {
105 task = performWorkItem(workOrder.current);
106 }
107 }
108 return task;
109 } finally {
110 isTaskRunning = false;
111 }
112 }
113
114 /**
115 * Return the work order describing the work that should be getting worked on,
116 * or `null` if there is currently no work to be done.
117 */
118 WorkOrder createNextWorkOrder() {
119 while (true) {
120 // Find the WorkManager with the highest priority.
121 WorkOrderPriority highestPriority = null;
122 WorkManager highestManager = null;
123 for (WorkManager manager in workManagers) {
124 WorkOrderPriority priority = manager.getNextResultPriority();
125 if (highestPriority == null || highestPriority.index > priority.index) {
126 highestPriority = priority;
127 highestManager = manager;
128 }
129 }
130 // Nothing to do.
131 if (highestPriority == WorkOrderPriority.NONE) {
132 return null;
133 }
134 // Create a new WorkOrder.
135 TargetedResult request = highestManager.getNextResult();
136 // print('request: $request');
137 if (request != null) {
138 WorkOrder workOrder =
139 createWorkOrderForResult(request.target, request.result);
140 if (workOrder != null) {
141 return workOrder;
142 }
143 }
144 }
145 }
146
147 /**
148 * Create a work order that will produce the given [result] for the given
149 * [target]. Return the work order that was created, or `null` if the result
150 * has already been computed.
151 */
152 WorkOrder createWorkOrderForResult(
153 AnalysisTarget target, ResultDescriptor result) {
154 CacheEntry entry = context.getCacheEntry(target);
155 CacheState state = entry.getState(result);
156 if (state == CacheState.VALID ||
157 state == CacheState.ERROR ||
158 state == CacheState.IN_PROCESS) {
159 return null;
160 }
161 TaskDescriptor taskDescriptor = taskManager.findTask(target, result);
162 try {
163 WorkItem workItem = new WorkItem(context, target, taskDescriptor, result);
164 return new WorkOrder(taskManager, workItem);
165 } catch (exception, stackTrace) {
166 throw new AnalysisException(
167 'Could not create work order (target = $target; taskDescriptor = $task Descriptor; result = $result)',
168 new CaughtException(exception, stackTrace));
169 }
170 }
171
172 /**
173 * Create a work order that will produce the required analysis results for
174 * the given [target]. If [isPriority] is true, then the target is a priority
175 * target. Return the work order that was created, or `null` if there is no
176 * further work that needs to be done for the given target.
177 */
178 WorkOrder createWorkOrderForTarget(AnalysisTarget target, bool isPriority) {
179 for (ResultDescriptor result in taskManager.generalResults) {
180 WorkOrder workOrder = createWorkOrderForResult(target, result);
181 if (workOrder != null) {
182 return workOrder;
183 }
184 }
185 if (isPriority) {
186 for (ResultDescriptor result in taskManager.priorityResults) {
187 WorkOrder workOrder = createWorkOrderForResult(target, result);
188 if (workOrder != null) {
189 return workOrder;
190 }
191 }
192 }
193 return null;
194 }
195
196 /**
197 * Return the stream that is notified when a new value for the given
198 * [descriptor] is computed.
199 */
200 Stream<ComputedResult> onResultComputed(ResultDescriptor descriptor) {
201 return resultComputedControllers
202 .putIfAbsent(descriptor,
203 () => new StreamController<ComputedResult>.broadcast(sync: true))
204 .stream;
205 }
206
207 /**
208 * Perform the next analysis task, and return `true` if there is more work to
209 * be done in order to compute all of the required analysis information.
210 */
211 bool performAnalysisTask() {
212 //
213 // TODO(brianwilkerson) This implementaiton does not allow us to prioritize
214 // work across contexts. What we need is a way for an external client to ask
215 // to have all priority files analyzed for each context, then ask for normal
216 // files to be analyzed. There are a couple of ways to do this.
217 //
218 // First, we could add a "bool priorityOnly" parameter to this method and
219 // return null here when it is true.
220 //
221 // Second, we could add a concept of a priority order and (externally) run
222 // through the priorities from highest to lowest. That would be a nice
223 // generalization of the previous idea, but it isn't clear that we need the
224 // generality.
225 //
226 // Third, we could move performAnalysisTask and createNextWorkOrder to an
227 // object that knows about all sources in all contexts, so that instead of
228 // the client choosing a context and telling it do to some work, the client
229 // simply says "do some work", and the engine chooses the best thing to do
230 // next regardless of what context it's in.
231 //
232 assert(!isTaskRunning);
233 try {
234 isTaskRunning = true;
235 if (currentWorkOrder == null) {
236 currentWorkOrder = createNextWorkOrder();
237 } else if (currentWorkOrder.moveNext()) {
238 performWorkItem(currentWorkOrder.current);
239 } else {
240 currentWorkOrder = createNextWorkOrder();
241 }
242 return currentWorkOrder != null;
243 } finally {
244 isTaskRunning = false;
245 }
246 }
247
248 /**
249 * Perform the given work item.
250 * Return the performed [AnalysisTask].
251 */
252 AnalysisTask performWorkItem(WorkItem item) {
253 if (item.exception != null) {
254 // Mark all of the results that the task would have computed as being in
255 // ERROR with the exception recorded on the work item.
256 CacheEntry targetEntry = context.getCacheEntry(item.target);
257 targetEntry.setErrorState(item.exception, item.descriptor.results);
258 return null;
259 }
260 // Otherwise, perform the task.
261 AnalysisTask task = item.buildTask();
262 _onTaskStartedController.add(task);
263 task.perform();
264 AnalysisTarget target = task.target;
265 CacheEntry entry = context.getCacheEntry(target);
266 if (task.caughtException == null) {
267 List<TargetedResult> dependedOn = item.inputTargetedResults.toList();
268 Map<ResultDescriptor, dynamic> outputs = task.outputs;
269 for (ResultDescriptor result in task.descriptor.results) {
270 // TODO(brianwilkerson) We could check here that a value was produced
271 // and throw an exception if not (unless we want to allow null values).
272 entry.setValue(result, outputs[result], dependedOn);
273 }
274 outputs.forEach((ResultDescriptor descriptor, value) {
275 StreamController<ComputedResult> controller =
276 resultComputedControllers[descriptor];
277 if (controller != null) {
278 ComputedResult event =
279 new ComputedResult(context, descriptor, target, value);
280 controller.add(event);
281 }
282 });
283 for (WorkManager manager in workManagers) {
284 manager.resultsComputed(target, outputs);
285 }
286 } else {
287 entry.setErrorState(task.caughtException, item.descriptor.results);
288 }
289 _onTaskCompletedController.add(task);
290 return task;
291 }
292
293 /**
294 * Reset the state of the driver in response to a change in the state of one
295 * or more analysis targets. This will cause any analysis that was currently
296 * in process to be stopped and for analysis to resume based on the new state.
297 */
298 void reset() {
299 currentWorkOrder = null;
300 }
301 }
302
303 /**
304 * Generic dependency walker suitable for use in the analysis task driver.
305 * This class implements a variant of the path-based strong component algorithm
306 * (described here: http://www.cs.colorado.edu/~hal/Papers/DFS/ipl.ps.gz), with
307 * the following differences:
308 *
309 * - The algorithm is non-recursive so that it can be used in a coroutine
310 * fashion (each call to [getNextStronglyConnectedComponent] computes a
311 * single strongly connected component and then waits to be called again)
312 *
313 * - Instead of keeping a temporary array which maps nodes to their locations
314 * in the [path] array, we simply search the array when necessary. This
315 * allows us to begin finding strongly connected components without having to
316 * know the size of the whole graph.
317 *
318 * - This algorithm does not compute all strongly connected components; only
319 * those reachable from the starting point which are as yet unevaluated.
320 *
321 * The algorithm, in essence, is to traverse the dependency graph in
322 * depth-first fashion from a starting node. If the path from the starting
323 * node ever encounters the same node twice, then a cycle has been found, and
324 * all the nodes comprising the cycle are (conceptually) contracted into a
325 * single node. The algorithm yields possibly-collapsed nodes in proper
326 * topological sort order (all the dependencies of a node are yielded before,
327 * or in the same contracted component as, the node itself).
328 */
329 abstract class CycleAwareDependencyWalker<Node> {
330 /**
331 * The path through the dependency graph that is currently being considered,
332 * with un-collapsed nodes.
333 */
334 final List<Node> _path;
335
336 /**
337 * For each node in [_path], a list of the unevaluated nodes which it is
338 * already known to depend on.
339 */
340 final List<List<Node>> _provisionalDependencies;
341
342 /**
343 * Indices into [_path] of the nodes which begin a new strongly connected
344 * component, in order. The first index in [_contractedPath] is always 0.
345 *
346 * For all i < contractedPath.length - 1, at least one node in the strongly
347 * connected component represented by [contractedPath[i]] depends directly
348 * on at least one node in the strongly connected component represented by
349 * [contractedPath[i+1]].
350 */
351 final List<int> _contractedPath;
352
353 /**
354 * Index into [_path] of the nodes which we are currently in the process of
355 * querying for their dependencies.
356 *
357 * [currentIndices.last] always refers to a member of the last strongly
358 * connected component indicated by [_contractedPath].
359 */
360 final List<int> _currentIndices;
361
362 /**
363 * Begin walking dependencies starting at [startingNode].
364 */
365 CycleAwareDependencyWalker(Node startingNode)
366 : _path = <Node>[startingNode],
367 _provisionalDependencies = <List<Node>>[<Node>[]],
368 _contractedPath = <int>[0],
369 _currentIndices = <int>[0];
370
371 /**
372 * Determine the next unevaluated input for [node], skipping any inputs in
373 * [skipInputs], and return it. If [node] has no further inputs, return
374 * `null`.
375 */
376 Node getNextInput(Node node, List<Node> skipInputs);
377
378 /**
379 * Determine the next strongly connected component in the graph, and return
380 * it. The client is expected to evaluate this component before calling
381 * [getNextStronglyConnectedComponent] again.
382 */
383 StronglyConnectedComponent<Node> getNextStronglyConnectedComponent() {
384 while (_currentIndices.isNotEmpty) {
385 Node nextUnevaluatedInput = getNextInput(_path[_currentIndices.last],
386 _provisionalDependencies[_currentIndices.last]);
387 assert(!_provisionalDependencies[_currentIndices.last]
388 .contains(nextUnevaluatedInput));
389 if (nextUnevaluatedInput != null) {
390 // TODO(paulberry): the call to _path.indexOf makes the algorithm
391 // O(n^2) in the depth of the dependency graph. If this becomes a
392 // problem, consider maintaining a map from node to index.
393 int previousIndex = _path.indexOf(nextUnevaluatedInput);
394 if (previousIndex != -1) {
395 // Update contractedPath to indicate that all nodes in the path
396 // between previousIndex and currentIndex are part of the same
397 // strongly connected component.
398 while (_contractedPath.last > previousIndex) {
399 _contractedPath.removeLast();
400 }
401 // Store nextUnevaluatedInput as a provisional dependency so that we
402 // can move on to computing other dependencies.
403 _provisionalDependencies[_currentIndices.last]
404 .add(nextUnevaluatedInput);
405 // And loop to move on to the node's next input.
406 continue;
407 } else {
408 // This is a brand new input and there's no reason (yet) to believe
409 // that it is in the same strongly connected component as any other
410 // node, so push it onto the end of the path.
411 int newIndex = _path.length;
412 _path.add(nextUnevaluatedInput);
413 _provisionalDependencies.add(<Node>[]);
414 _contractedPath.add(newIndex);
415 _currentIndices.add(newIndex);
416 // And loop to move on to the new node's inputs.
417 continue;
418 }
419 } else {
420 // The node has no more inputs. Figure out if there are any more nodes
421 // in the current strongly connected component that need to have their
422 // indices examined.
423 _currentIndices.removeLast();
424 if (_currentIndices.isEmpty ||
425 _currentIndices.last < _contractedPath.last) {
426 // No more nodes in the current strongly connected component need to
427 // have their indices examined. We can now yield this component to
428 // the caller.
429 List<Node> nodes = _path.sublist(_contractedPath.last);
430 bool containsCycle = nodes.length > 1;
431 if (!containsCycle) {
432 if (_provisionalDependencies.last.isNotEmpty) {
433 containsCycle = true;
434 }
435 }
436 _path.length = _contractedPath.last;
437 _provisionalDependencies.length = _contractedPath.last;
438 _contractedPath.removeLast();
439 return new StronglyConnectedComponent<Node>(nodes, containsCycle);
440 } else {
441 // At least one node in the current strongly connected component
442 // still needs to have its inputs examined. So loop and allow the
443 // inputs to be examined.
444 continue;
445 }
446 }
447 }
448 // No further strongly connected components found.
449 return null;
450 }
451 }
452
453 /**
454 * A place to define the behaviors that need to be added to
455 * [InternalAnalysisContext].
456 */
457 abstract class ExtendedAnalysisContext implements InternalAnalysisContext {
458 List<AnalysisTarget> get explicitTargets;
459 List<AnalysisTarget> get priorityTargets;
460 void set typeProvider(TypeProvider typeProvider);
461 CacheEntry getCacheEntry(AnalysisTarget target);
462 }
463
464 /**
465 * An exception indicating that an attempt was made to perform a task on a
466 * target while gathering the inputs to perform the same task for the same
467 * target.
468 */
469 class InfiniteTaskLoopException extends AnalysisException {
470 /**
471 * If a dependency cycle was found while computing the inputs for the task,
472 * the set of [WorkItem]s contained in the cycle (if there are overlapping
473 * cycles, this is the set of all [WorkItem]s in the entire strongly
474 * connected component). Otherwise, `null`.
475 */
476 final List<WorkItem> dependencyCycle;
477
478 /**
479 * Initialize a newly created exception to represent a failed attempt to
480 * perform the given [task] due to the given [dependencyCycle].
481 */
482 InfiniteTaskLoopException(AnalysisTask task, this.dependencyCycle)
483 : super(
484 'Infinite loop while performing task ${task.descriptor.name} for ${t ask.target}');
485 }
486
487 /**
488 * Object used by CycleAwareDependencyWalker to report a single strongly
489 * connected component of nodes.
490 */
491 class StronglyConnectedComponent<Node> {
492 /**
493 * The nodes contained in the strongly connected component.
494 */
495 final List<Node> nodes;
496
497 /**
498 * Indicates whether the strongly component contains any cycles. Note that
499 * if [nodes] has multiple elements, this will always be `true`. However, if
500 * [nodes] has exactly one element, this may be either `true` or `false`
501 * depending on whether the node has a dependency on itself.
502 */
503 final bool containsCycle;
504
505 StronglyConnectedComponent(this.nodes, this.containsCycle);
506 }
507
508 /**
509 * A description of a single anaysis task that can be performed to advance
510 * analysis.
511 */
512 class WorkItem {
513 /**
514 * The context in which the task will be performed.
515 */
516 final InternalAnalysisContext context;
517
518 /**
519 * The target for which a task is to be performed.
520 */
521 final AnalysisTarget target;
522
523 /**
524 * A description of the task to be performed.
525 */
526 final TaskDescriptor descriptor;
527
528 /**
529 * The [ResultDescriptor] which was led to this work item being spawned.
530 */
531 final ResultDescriptor spawningResult;
532
533 /**
534 * An iterator used to iterate over the descriptors of the inputs to the task,
535 * or `null` if all of the inputs have been collected and the task can be
536 * created.
537 */
538 TaskInputBuilder builder;
539
540 /**
541 * The [TargetedResult]s outputs of this task depends on.
542 */
543 final HashSet<TargetedResult> inputTargetedResults =
544 new HashSet<TargetedResult>();
545
546 /**
547 * The inputs to the task that have been computed.
548 */
549 Map<String, dynamic> inputs;
550
551 /**
552 * The exception that was found while trying to populate the inputs. If this
553 * field is non-`null`, then the task cannot be performed and all of the
554 * results that this task would have computed need to be marked as being in
555 * ERROR with this exception.
556 */
557 CaughtException exception = null;
558
559 /**
560 * If a dependency cycle was found while computing the inputs for the task,
561 * the set of [WorkItem]s contained in the cycle (if there are overlapping
562 * cycles, this is the set of all [WorkItem]s in the entire strongly
563 * connected component). Otherwise, `null`.
564 */
565 List<WorkItem> dependencyCycle;
566
567 /**
568 * Initialize a newly created work item to compute the inputs for the task
569 * described by the given descriptor.
570 */
571 WorkItem(this.context, this.target, this.descriptor, this.spawningResult) {
572 AnalysisTarget actualTarget =
573 identical(target, AnalysisContextTarget.request)
574 ? new AnalysisContextTarget(context)
575 : target;
576 Map<String, TaskInput> inputDescriptors =
577 descriptor.createTaskInputs(actualTarget);
578 builder = new TopLevelTaskInputBuilder(inputDescriptors);
579 if (!builder.moveNext()) {
580 builder = null;
581 }
582 inputs = new HashMap<String, dynamic>();
583 }
584
585 @override
586 int get hashCode =>
587 JenkinsSmiHash.hash2(descriptor.hashCode, target.hashCode);
588
589 @override
590 bool operator ==(other) {
591 if (other is WorkItem) {
592 return this.descriptor == other.descriptor && this.target == other.target;
593 } else {
594 return false;
595 }
596 }
597
598 /**
599 * Build the task represented by this work item.
600 */
601 AnalysisTask buildTask() {
602 if (builder != null) {
603 throw new StateError("some inputs have not been computed");
604 }
605 AnalysisTask task = descriptor.createTask(context, target, inputs);
606 task.dependencyCycle = dependencyCycle;
607 return task;
608 }
609
610 /**
611 * Gather all of the inputs needed to perform the task.
612 *
613 * If at least one of the inputs have not yet been computed, return a work
614 * item that can be used to generate that input to indicate that the caller
615 * should perform the returned item's task before returning to gathering
616 * inputs for this item's task.
617 *
618 * If all of the inputs have been gathered, return `null` to indicate that the
619 * client should build and perform the task. A value of `null` will also be
620 * returned if some of the inputs cannot be computed and the task cannot be
621 * performed. Callers can differentiate between these cases by checking the
622 * [exception] field. If the field is `null`, then the task can be performed;
623 * if the field is non-`null` then the task cannot be performed and all of the
624 * tasks' results should be marked as being in ERROR.
625 */
626 WorkItem gatherInputs(TaskManager taskManager, List<WorkItem> skipInputs) {
627 while (builder != null) {
628 AnalysisTarget inputTarget = builder.currentTarget;
629 ResultDescriptor inputResult = builder.currentResult;
630 inputTargetedResults.add(new TargetedResult(inputTarget, inputResult));
631 CacheEntry inputEntry = context.getCacheEntry(inputTarget);
632 CacheState inputState = inputEntry.getState(inputResult);
633 if (skipInputs.any((WorkItem item) =>
634 item.target == inputTarget && item.spawningResult == inputResult)) {
635 // This input is being skipped due to a circular dependency. Tell the
636 // builder that it's not available so we can move on to other inputs.
637 builder.currentValueNotAvailable();
638 } else if (inputState == CacheState.ERROR) {
639 exception = inputEntry.exception;
640 return null;
641 } else if (inputState == CacheState.IN_PROCESS) {
642 //
643 // TODO(brianwilkerson) Implement this case.
644 //
645 // One possibility would be to return a WorkItem that would perform a
646 // no-op task in order to cause us to come back to this work item on the
647 // next iteration. It would be more efficient, in general, to push this
648 // input onto a waiting list and proceed to the next input so that work
649 // could proceed, but given that the only result that can currently be
650 // IN_PROCESS is CONTENT, I don't know that it's worth the extra effort
651 // to implement the general solution at this point.
652 //
653 throw new UnimplementedError();
654 } else if (inputState != CacheState.VALID) {
655 try {
656 TaskDescriptor descriptor =
657 taskManager.findTask(inputTarget, inputResult);
658 return new WorkItem(context, inputTarget, descriptor, inputResult);
659 } on AnalysisException catch (exception, stackTrace) {
660 this.exception = new CaughtException(exception, stackTrace);
661 return null;
662 }
663 } else {
664 builder.currentValue = inputEntry.getValue(inputResult);
665 }
666 if (!builder.moveNext()) {
667 inputs = builder.inputValue;
668 builder = null;
669 }
670 }
671 return null;
672 }
673
674 @override
675 String toString() => 'Run $descriptor on $target';
676 }
677
678 /**
679 * A description of the work to be done to compute a desired analysis result.
680 * The class implements a lazy depth-first traversal of the work item's input.
681 */
682 class WorkOrder implements Iterator<WorkItem> {
683 /**
684 * The dependency walker which is being used to determine what work to do
685 * next.
686 */
687 final _WorkOrderDependencyWalker _dependencyWalker;
688
689 /**
690 * The strongly connected component most recently returned by
691 * [_dependencyWalker], minus any [WorkItem]s that the iterator has already
692 * moved past.
693 *
694 * Null if the [_dependencyWalker] hasn't been used yet.
695 */
696 List<WorkItem> currentItems;
697
698 /**
699 * Initialize a newly created work order to compute the result described by
700 * the given work item.
701 */
702 WorkOrder(TaskManager taskManager, WorkItem item)
703 : _dependencyWalker = new _WorkOrderDependencyWalker(taskManager, item);
704
705 @override
706 WorkItem get current {
707 if (currentItems == null) {
708 return null;
709 } else {
710 return currentItems.last;
711 }
712 }
713
714 @override
715 bool moveNext() {
716 return workOrderMoveNextPerfTag.makeCurrentWhile(() {
717 if (currentItems != null && currentItems.length > 1) {
718 // Yield more items.
719 currentItems.removeLast();
720 return true;
721 } else {
722 // Get a new strongly connected component.
723 StronglyConnectedComponent<WorkItem> nextStronglyConnectedComponent =
724 _dependencyWalker.getNextStronglyConnectedComponent();
725 if (nextStronglyConnectedComponent == null) {
726 currentItems = null;
727 return false;
728 }
729 currentItems = nextStronglyConnectedComponent.nodes;
730 if (nextStronglyConnectedComponent.containsCycle) {
731 // A cycle has been found.
732 for (WorkItem item in currentItems) {
733 item.dependencyCycle = currentItems.toList();
734 }
735 } else {
736 assert(currentItems.length == 1);
737 }
738 return true;
739 }
740 });
741 }
742 }
743
744 /**
745 * Specilaization of [CycleAwareDependencyWalker] for use by [WorkOrder].
746 */
747 class _WorkOrderDependencyWalker extends CycleAwareDependencyWalker<WorkItem> {
748 /**
749 * The task manager used to build work items.
750 */
751 final TaskManager taskManager;
752
753 _WorkOrderDependencyWalker(this.taskManager, WorkItem startingNode)
754 : super(startingNode);
755
756 @override
757 WorkItem getNextInput(WorkItem node, List<WorkItem> skipInputs) {
758 return node.gatherInputs(taskManager, skipInputs);
759 }
760 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698