OLD | NEW |
(Empty) | |
| 1 # |
| 2 # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The S
Cons Foundation |
| 3 # |
| 4 # Permission is hereby granted, free of charge, to any person obtaining |
| 5 # a copy of this software and associated documentation files (the |
| 6 # "Software"), to deal in the Software without restriction, including |
| 7 # without limitation the rights to use, copy, modify, merge, publish, |
| 8 # distribute, sublicense, and/or sell copies of the Software, and to |
| 9 # permit persons to whom the Software is furnished to do so, subject to |
| 10 # the following conditions: |
| 11 # |
| 12 # The above copyright notice and this permission notice shall be included |
| 13 # in all copies or substantial portions of the Software. |
| 14 # |
| 15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY |
| 16 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE |
| 17 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 18 # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE |
| 19 # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION |
| 20 # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION |
| 21 # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 22 |
| 23 __doc__ = """ |
| 24 Generic Taskmaster module for the SCons build engine. |
| 25 |
| 26 This module contains the primary interface(s) between a wrapping user |
| 27 interface and the SCons build engine. There are two key classes here: |
| 28 |
| 29 Taskmaster |
| 30 This is the main engine for walking the dependency graph and |
| 31 calling things to decide what does or doesn't need to be built. |
| 32 |
| 33 Task |
| 34 This is the base class for allowing a wrapping interface to |
| 35 decide what does or doesn't actually need to be done. The |
| 36 intention is for a wrapping interface to subclass this as |
| 37 appropriate for different types of behavior it may need. |
| 38 |
| 39 The canonical example is the SCons native Python interface, |
| 40 which has Task subclasses that handle its specific behavior, |
| 41 like printing "`foo' is up to date" when a top-level target |
| 42 doesn't need to be built, and handling the -c option by removing |
| 43 targets as its "build" action. There is also a separate subclass |
| 44 for suppressing this output when the -q option is used. |
| 45 |
| 46 The Taskmaster instantiates a Task object for each (set of) |
| 47 target(s) that it decides need to be evaluated and/or built. |
| 48 """ |
| 49 |
| 50 __revision__ = "src/engine/SCons/Taskmaster.py 5134 2010/08/16 23:02:40 bdeegan" |
| 51 |
| 52 from itertools import chain |
| 53 import operator |
| 54 import sys |
| 55 import traceback |
| 56 |
| 57 import SCons.Errors |
| 58 import SCons.Node |
| 59 import SCons.Warnings |
| 60 |
| 61 StateString = SCons.Node.StateString |
| 62 NODE_NO_STATE = SCons.Node.no_state |
| 63 NODE_PENDING = SCons.Node.pending |
| 64 NODE_EXECUTING = SCons.Node.executing |
| 65 NODE_UP_TO_DATE = SCons.Node.up_to_date |
| 66 NODE_EXECUTED = SCons.Node.executed |
| 67 NODE_FAILED = SCons.Node.failed |
| 68 |
| 69 |
| 70 # A subsystem for recording stats about how different Nodes are handled by |
| 71 # the main Taskmaster loop. There's no external control here (no need for |
| 72 # a --debug= option); enable it by changing the value of CollectStats. |
| 73 |
| 74 CollectStats = None |
| 75 |
| 76 class Stats(object): |
| 77 """ |
| 78 A simple class for holding statistics about the disposition of a |
| 79 Node by the Taskmaster. If we're collecting statistics, each Node |
| 80 processed by the Taskmaster gets one of these attached, in which case |
| 81 the Taskmaster records its decision each time it processes the Node. |
| 82 (Ideally, that's just once per Node.) |
| 83 """ |
| 84 def __init__(self): |
| 85 """ |
| 86 Instantiates a Taskmaster.Stats object, initializing all |
| 87 appropriate counters to zero. |
| 88 """ |
| 89 self.considered = 0 |
| 90 self.already_handled = 0 |
| 91 self.problem = 0 |
| 92 self.child_failed = 0 |
| 93 self.not_built = 0 |
| 94 self.side_effects = 0 |
| 95 self.build = 0 |
| 96 |
| 97 StatsNodes = [] |
| 98 |
| 99 fmt = "%(considered)3d "\ |
| 100 "%(already_handled)3d " \ |
| 101 "%(problem)3d " \ |
| 102 "%(child_failed)3d " \ |
| 103 "%(not_built)3d " \ |
| 104 "%(side_effects)3d " \ |
| 105 "%(build)3d " |
| 106 |
| 107 def dump_stats(): |
| 108 for n in sorted(StatsNodes, key=lambda a: str(a)): |
| 109 print (fmt % n.stats.__dict__) + str(n) |
| 110 |
| 111 |
| 112 |
| 113 class Task(object): |
| 114 """ |
| 115 Default SCons build engine task. |
| 116 |
| 117 This controls the interaction of the actual building of node |
| 118 and the rest of the engine. |
| 119 |
| 120 This is expected to handle all of the normally-customizable |
| 121 aspects of controlling a build, so any given application |
| 122 *should* be able to do what it wants by sub-classing this |
| 123 class and overriding methods as appropriate. If an application |
| 124 needs to customze something by sub-classing Taskmaster (or |
| 125 some other build engine class), we should first try to migrate |
| 126 that functionality into this class. |
| 127 |
| 128 Note that it's generally a good idea for sub-classes to call |
| 129 these methods explicitly to update state, etc., rather than |
| 130 roll their own interaction with Taskmaster from scratch. |
| 131 """ |
| 132 def __init__(self, tm, targets, top, node): |
| 133 self.tm = tm |
| 134 self.targets = targets |
| 135 self.top = top |
| 136 self.node = node |
| 137 self.exc_clear() |
| 138 |
| 139 def trace_message(self, method, node, description='node'): |
| 140 fmt = '%-20s %s %s\n' |
| 141 return fmt % (method + ':', description, self.tm.trace_node(node)) |
| 142 |
| 143 def display(self, message): |
| 144 """ |
| 145 Hook to allow the calling interface to display a message. |
| 146 |
| 147 This hook gets called as part of preparing a task for execution |
| 148 (that is, a Node to be built). As part of figuring out what Node |
| 149 should be built next, the actually target list may be altered, |
| 150 along with a message describing the alteration. The calling |
| 151 interface can subclass Task and provide a concrete implementation |
| 152 of this method to see those messages. |
| 153 """ |
| 154 pass |
| 155 |
| 156 def prepare(self): |
| 157 """ |
| 158 Called just before the task is executed. |
| 159 |
| 160 This is mainly intended to give the target Nodes a chance to |
| 161 unlink underlying files and make all necessary directories before |
| 162 the Action is actually called to build the targets. |
| 163 """ |
| 164 T = self.tm.trace |
| 165 if T: T.write(self.trace_message(u'Task.prepare()', self.node)) |
| 166 |
| 167 # Now that it's the appropriate time, give the TaskMaster a |
| 168 # chance to raise any exceptions it encountered while preparing |
| 169 # this task. |
| 170 self.exception_raise() |
| 171 |
| 172 if self.tm.message: |
| 173 self.display(self.tm.message) |
| 174 self.tm.message = None |
| 175 |
| 176 # Let the targets take care of any necessary preparations. |
| 177 # This includes verifying that all of the necessary sources |
| 178 # and dependencies exist, removing the target file(s), etc. |
| 179 # |
| 180 # As of April 2008, the get_executor().prepare() method makes |
| 181 # sure that all of the aggregate sources necessary to build this |
| 182 # Task's target(s) exist in one up-front check. The individual |
| 183 # target t.prepare() methods check that each target's explicit |
| 184 # or implicit dependencies exists, and also initialize the |
| 185 # .sconsign info. |
| 186 executor = self.targets[0].get_executor() |
| 187 executor.prepare() |
| 188 for t in executor.get_action_targets(): |
| 189 t.prepare() |
| 190 for s in t.side_effects: |
| 191 s.prepare() |
| 192 |
| 193 def get_target(self): |
| 194 """Fetch the target being built or updated by this task. |
| 195 """ |
| 196 return self.node |
| 197 |
| 198 def needs_execute(self): |
| 199 # TODO(deprecate): "return True" is the old default behavior; |
| 200 # change it to NotImplementedError (after running through the |
| 201 # Deprecation Cycle) so the desired behavior is explicitly |
| 202 # determined by which concrete subclass is used. |
| 203 #raise NotImplementedError |
| 204 msg = ('Taskmaster.Task is an abstract base class; instead of\n' |
| 205 '\tusing it directly, ' |
| 206 'derive from it and override the abstract methods.') |
| 207 SCons.Warnings.warn(SCons.Warnings.TaskmasterNeedsExecuteWarning, msg) |
| 208 return True |
| 209 |
| 210 def execute(self): |
| 211 """ |
| 212 Called to execute the task. |
| 213 |
| 214 This method is called from multiple threads in a parallel build, |
| 215 so only do thread safe stuff here. Do thread unsafe stuff in |
| 216 prepare(), executed() or failed(). |
| 217 """ |
| 218 T = self.tm.trace |
| 219 if T: T.write(self.trace_message(u'Task.execute()', self.node)) |
| 220 |
| 221 try: |
| 222 everything_was_cached = 1 |
| 223 for t in self.targets: |
| 224 if t.retrieve_from_cache(): |
| 225 # Call the .built() method without calling the |
| 226 # .push_to_cache() method, since we just got the |
| 227 # target from the cache and don't need to push |
| 228 # it back there. |
| 229 t.set_state(NODE_EXECUTED) |
| 230 t.built() |
| 231 else: |
| 232 everything_was_cached = 0 |
| 233 break |
| 234 if not everything_was_cached: |
| 235 self.targets[0].build() |
| 236 except SystemExit: |
| 237 exc_value = sys.exc_info()[1] |
| 238 raise SCons.Errors.ExplicitExit(self.targets[0], exc_value.code) |
| 239 except SCons.Errors.UserError: |
| 240 raise |
| 241 except SCons.Errors.BuildError: |
| 242 raise |
| 243 except Exception, e: |
| 244 buildError = SCons.Errors.convert_to_BuildError(e) |
| 245 buildError.node = self.targets[0] |
| 246 buildError.exc_info = sys.exc_info() |
| 247 raise buildError |
| 248 |
| 249 def executed_without_callbacks(self): |
| 250 """ |
| 251 Called when the task has been successfully executed |
| 252 and the Taskmaster instance doesn't want to call |
| 253 the Node's callback methods. |
| 254 """ |
| 255 T = self.tm.trace |
| 256 if T: T.write(self.trace_message('Task.executed_without_callbacks()', |
| 257 self.node)) |
| 258 |
| 259 for t in self.targets: |
| 260 if t.get_state() == NODE_EXECUTING: |
| 261 for side_effect in t.side_effects: |
| 262 side_effect.set_state(NODE_NO_STATE) |
| 263 t.set_state(NODE_EXECUTED) |
| 264 |
| 265 def executed_with_callbacks(self): |
| 266 """ |
| 267 Called when the task has been successfully executed and |
| 268 the Taskmaster instance wants to call the Node's callback |
| 269 methods. |
| 270 |
| 271 This may have been a do-nothing operation (to preserve build |
| 272 order), so we must check the node's state before deciding whether |
| 273 it was "built", in which case we call the appropriate Node method. |
| 274 In any event, we always call "visited()", which will handle any |
| 275 post-visit actions that must take place regardless of whether |
| 276 or not the target was an actual built target or a source Node. |
| 277 """ |
| 278 T = self.tm.trace |
| 279 if T: T.write(self.trace_message('Task.executed_with_callbacks()', |
| 280 self.node)) |
| 281 |
| 282 for t in self.targets: |
| 283 if t.get_state() == NODE_EXECUTING: |
| 284 for side_effect in t.side_effects: |
| 285 side_effect.set_state(NODE_NO_STATE) |
| 286 t.set_state(NODE_EXECUTED) |
| 287 t.push_to_cache() |
| 288 t.built() |
| 289 t.visited() |
| 290 |
| 291 executed = executed_with_callbacks |
| 292 |
| 293 def failed(self): |
| 294 """ |
| 295 Default action when a task fails: stop the build. |
| 296 |
| 297 Note: Although this function is normally invoked on nodes in |
| 298 the executing state, it might also be invoked on up-to-date |
| 299 nodes when using Configure(). |
| 300 """ |
| 301 self.fail_stop() |
| 302 |
| 303 def fail_stop(self): |
| 304 """ |
| 305 Explicit stop-the-build failure. |
| 306 |
| 307 This sets failure status on the target nodes and all of |
| 308 their dependent parent nodes. |
| 309 |
| 310 Note: Although this function is normally invoked on nodes in |
| 311 the executing state, it might also be invoked on up-to-date |
| 312 nodes when using Configure(). |
| 313 """ |
| 314 T = self.tm.trace |
| 315 if T: T.write(self.trace_message('Task.failed_stop()', self.node)) |
| 316 |
| 317 # Invoke will_not_build() to clean-up the pending children |
| 318 # list. |
| 319 self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED)) |
| 320 |
| 321 # Tell the taskmaster to not start any new tasks |
| 322 self.tm.stop() |
| 323 |
| 324 # We're stopping because of a build failure, but give the |
| 325 # calling Task class a chance to postprocess() the top-level |
| 326 # target under which the build failure occurred. |
| 327 self.targets = [self.tm.current_top] |
| 328 self.top = 1 |
| 329 |
| 330 def fail_continue(self): |
| 331 """ |
| 332 Explicit continue-the-build failure. |
| 333 |
| 334 This sets failure status on the target nodes and all of |
| 335 their dependent parent nodes. |
| 336 |
| 337 Note: Although this function is normally invoked on nodes in |
| 338 the executing state, it might also be invoked on up-to-date |
| 339 nodes when using Configure(). |
| 340 """ |
| 341 T = self.tm.trace |
| 342 if T: T.write(self.trace_message('Task.failed_continue()', self.node)) |
| 343 |
| 344 self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED)) |
| 345 |
| 346 def make_ready_all(self): |
| 347 """ |
| 348 Marks all targets in a task ready for execution. |
| 349 |
| 350 This is used when the interface needs every target Node to be |
| 351 visited--the canonical example being the "scons -c" option. |
| 352 """ |
| 353 T = self.tm.trace |
| 354 if T: T.write(self.trace_message('Task.make_ready_all()', self.node)) |
| 355 |
| 356 self.out_of_date = self.targets[:] |
| 357 for t in self.targets: |
| 358 t.disambiguate().set_state(NODE_EXECUTING) |
| 359 for s in t.side_effects: |
| 360 # add disambiguate here to mirror the call on targets above |
| 361 s.disambiguate().set_state(NODE_EXECUTING) |
| 362 |
| 363 def make_ready_current(self): |
| 364 """ |
| 365 Marks all targets in a task ready for execution if any target |
| 366 is not current. |
| 367 |
| 368 This is the default behavior for building only what's necessary. |
| 369 """ |
| 370 T = self.tm.trace |
| 371 if T: T.write(self.trace_message(u'Task.make_ready_current()', |
| 372 self.node)) |
| 373 |
| 374 self.out_of_date = [] |
| 375 needs_executing = False |
| 376 for t in self.targets: |
| 377 try: |
| 378 t.disambiguate().make_ready() |
| 379 is_up_to_date = not t.has_builder() or \ |
| 380 (not t.always_build and t.is_up_to_date()) |
| 381 except EnvironmentError, e: |
| 382 raise SCons.Errors.BuildError(node=t, errstr=e.strerror, filenam
e=e.filename) |
| 383 |
| 384 if not is_up_to_date: |
| 385 self.out_of_date.append(t) |
| 386 needs_executing = True |
| 387 |
| 388 if needs_executing: |
| 389 for t in self.targets: |
| 390 t.set_state(NODE_EXECUTING) |
| 391 for s in t.side_effects: |
| 392 # add disambiguate here to mirror the call on targets in fir
st loop above |
| 393 s.disambiguate().set_state(NODE_EXECUTING) |
| 394 else: |
| 395 for t in self.targets: |
| 396 # We must invoke visited() to ensure that the node |
| 397 # information has been computed before allowing the |
| 398 # parent nodes to execute. (That could occur in a |
| 399 # parallel build...) |
| 400 t.visited() |
| 401 t.set_state(NODE_UP_TO_DATE) |
| 402 |
| 403 make_ready = make_ready_current |
| 404 |
| 405 def postprocess(self): |
| 406 """ |
| 407 Post-processes a task after it's been executed. |
| 408 |
| 409 This examines all the targets just built (or not, we don't care |
| 410 if the build was successful, or even if there was no build |
| 411 because everything was up-to-date) to see if they have any |
| 412 waiting parent Nodes, or Nodes waiting on a common side effect, |
| 413 that can be put back on the candidates list. |
| 414 """ |
| 415 T = self.tm.trace |
| 416 if T: T.write(self.trace_message(u'Task.postprocess()', self.node)) |
| 417 |
| 418 # We may have built multiple targets, some of which may have |
| 419 # common parents waiting for this build. Count up how many |
| 420 # targets each parent was waiting for so we can subtract the |
| 421 # values later, and so we *don't* put waiting side-effect Nodes |
| 422 # back on the candidates list if the Node is also a waiting |
| 423 # parent. |
| 424 |
| 425 targets = set(self.targets) |
| 426 |
| 427 pending_children = self.tm.pending_children |
| 428 parents = {} |
| 429 for t in targets: |
| 430 # A node can only be in the pending_children set if it has |
| 431 # some waiting_parents. |
| 432 if t.waiting_parents: |
| 433 if T: T.write(self.trace_message(u'Task.postprocess()', |
| 434 t, |
| 435 'removing')) |
| 436 pending_children.discard(t) |
| 437 for p in t.waiting_parents: |
| 438 parents[p] = parents.get(p, 0) + 1 |
| 439 |
| 440 for t in targets: |
| 441 for s in t.side_effects: |
| 442 if s.get_state() == NODE_EXECUTING: |
| 443 s.set_state(NODE_NO_STATE) |
| 444 for p in s.waiting_parents: |
| 445 parents[p] = parents.get(p, 0) + 1 |
| 446 for p in s.waiting_s_e: |
| 447 if p.ref_count == 0: |
| 448 self.tm.candidates.append(p) |
| 449 |
| 450 for p, subtract in parents.items(): |
| 451 p.ref_count = p.ref_count - subtract |
| 452 if T: T.write(self.trace_message(u'Task.postprocess()', |
| 453 p, |
| 454 'adjusted parent ref count')) |
| 455 if p.ref_count == 0: |
| 456 self.tm.candidates.append(p) |
| 457 |
| 458 for t in targets: |
| 459 t.postprocess() |
| 460 |
| 461 # Exception handling subsystem. |
| 462 # |
| 463 # Exceptions that occur while walking the DAG or examining Nodes |
| 464 # must be raised, but must be raised at an appropriate time and in |
| 465 # a controlled manner so we can, if necessary, recover gracefully, |
| 466 # possibly write out signature information for Nodes we've updated, |
| 467 # etc. This is done by having the Taskmaster tell us about the |
| 468 # exception, and letting |
| 469 |
| 470 def exc_info(self): |
| 471 """ |
| 472 Returns info about a recorded exception. |
| 473 """ |
| 474 return self.exception |
| 475 |
| 476 def exc_clear(self): |
| 477 """ |
| 478 Clears any recorded exception. |
| 479 |
| 480 This also changes the "exception_raise" attribute to point |
| 481 to the appropriate do-nothing method. |
| 482 """ |
| 483 self.exception = (None, None, None) |
| 484 self.exception_raise = self._no_exception_to_raise |
| 485 |
| 486 def exception_set(self, exception=None): |
| 487 """ |
| 488 Records an exception to be raised at the appropriate time. |
| 489 |
| 490 This also changes the "exception_raise" attribute to point |
| 491 to the method that will, in fact |
| 492 """ |
| 493 if not exception: |
| 494 exception = sys.exc_info() |
| 495 self.exception = exception |
| 496 self.exception_raise = self._exception_raise |
| 497 |
| 498 def _no_exception_to_raise(self): |
| 499 pass |
| 500 |
| 501 def _exception_raise(self): |
| 502 """ |
| 503 Raises a pending exception that was recorded while getting a |
| 504 Task ready for execution. |
| 505 """ |
| 506 exc = self.exc_info()[:] |
| 507 try: |
| 508 exc_type, exc_value, exc_traceback = exc |
| 509 except ValueError: |
| 510 exc_type, exc_value = exc |
| 511 exc_traceback = None |
| 512 raise exc_type, exc_value, exc_traceback |
| 513 |
| 514 class AlwaysTask(Task): |
| 515 def needs_execute(self): |
| 516 """ |
| 517 Always returns True (indicating this Task should always |
| 518 be executed). |
| 519 |
| 520 Subclasses that need this behavior (as opposed to the default |
| 521 of only executing Nodes that are out of date w.r.t. their |
| 522 dependencies) can use this as follows: |
| 523 |
| 524 class MyTaskSubclass(SCons.Taskmaster.Task): |
| 525 needs_execute = SCons.Taskmaster.Task.execute_always |
| 526 """ |
| 527 return True |
| 528 |
| 529 class OutOfDateTask(Task): |
| 530 def needs_execute(self): |
| 531 """ |
| 532 Returns True (indicating this Task should be executed) if this |
| 533 Task's target state indicates it needs executing, which has |
| 534 already been determined by an earlier up-to-date check. |
| 535 """ |
| 536 return self.targets[0].get_state() == SCons.Node.executing |
| 537 |
| 538 |
| 539 def find_cycle(stack, visited): |
| 540 if stack[-1] in visited: |
| 541 return None |
| 542 visited.add(stack[-1]) |
| 543 for n in stack[-1].waiting_parents: |
| 544 stack.append(n) |
| 545 if stack[0] == stack[-1]: |
| 546 return stack |
| 547 if find_cycle(stack, visited): |
| 548 return stack |
| 549 stack.pop() |
| 550 return None |
| 551 |
| 552 |
| 553 class Taskmaster(object): |
| 554 """ |
| 555 The Taskmaster for walking the dependency DAG. |
| 556 """ |
| 557 |
| 558 def __init__(self, targets=[], tasker=None, order=None, trace=None): |
| 559 self.original_top = targets |
| 560 self.top_targets_left = targets[:] |
| 561 self.top_targets_left.reverse() |
| 562 self.candidates = [] |
| 563 if tasker is None: |
| 564 tasker = OutOfDateTask |
| 565 self.tasker = tasker |
| 566 if not order: |
| 567 order = lambda l: l |
| 568 self.order = order |
| 569 self.message = None |
| 570 self.trace = trace |
| 571 self.next_candidate = self.find_next_candidate |
| 572 self.pending_children = set() |
| 573 |
| 574 def find_next_candidate(self): |
| 575 """ |
| 576 Returns the next candidate Node for (potential) evaluation. |
| 577 |
| 578 The candidate list (really a stack) initially consists of all of |
| 579 the top-level (command line) targets provided when the Taskmaster |
| 580 was initialized. While we walk the DAG, visiting Nodes, all the |
| 581 children that haven't finished processing get pushed on to the |
| 582 candidate list. Each child can then be popped and examined in |
| 583 turn for whether *their* children are all up-to-date, in which |
| 584 case a Task will be created for their actual evaluation and |
| 585 potential building. |
| 586 |
| 587 Here is where we also allow candidate Nodes to alter the list of |
| 588 Nodes that should be examined. This is used, for example, when |
| 589 invoking SCons in a source directory. A source directory Node can |
| 590 return its corresponding build directory Node, essentially saying, |
| 591 "Hey, you really need to build this thing over here instead." |
| 592 """ |
| 593 try: |
| 594 return self.candidates.pop() |
| 595 except IndexError: |
| 596 pass |
| 597 try: |
| 598 node = self.top_targets_left.pop() |
| 599 except IndexError: |
| 600 return None |
| 601 self.current_top = node |
| 602 alt, message = node.alter_targets() |
| 603 if alt: |
| 604 self.message = message |
| 605 self.candidates.append(node) |
| 606 self.candidates.extend(self.order(alt)) |
| 607 node = self.candidates.pop() |
| 608 return node |
| 609 |
| 610 def no_next_candidate(self): |
| 611 """ |
| 612 Stops Taskmaster processing by not returning a next candidate. |
| 613 |
| 614 Note that we have to clean-up the Taskmaster candidate list |
| 615 because the cycle detection depends on the fact all nodes have |
| 616 been processed somehow. |
| 617 """ |
| 618 while self.candidates: |
| 619 candidates = self.candidates |
| 620 self.candidates = [] |
| 621 self.will_not_build(candidates) |
| 622 return None |
| 623 |
| 624 def _validate_pending_children(self): |
| 625 """ |
| 626 Validate the content of the pending_children set. Assert if an |
| 627 internal error is found. |
| 628 |
| 629 This function is used strictly for debugging the taskmaster by |
| 630 checking that no invariants are violated. It is not used in |
| 631 normal operation. |
| 632 |
| 633 The pending_children set is used to detect cycles in the |
| 634 dependency graph. We call a "pending child" a child that is |
| 635 found in the "pending" state when checking the dependencies of |
| 636 its parent node. |
| 637 |
| 638 A pending child can occur when the Taskmaster completes a loop |
| 639 through a cycle. For example, lets imagine a graph made of |
| 640 three node (A, B and C) making a cycle. The evaluation starts |
| 641 at node A. The taskmaster first consider whether node A's |
| 642 child B is up-to-date. Then, recursively, node B needs to |
| 643 check whether node C is up-to-date. This leaves us with a |
| 644 dependency graph looking like: |
| 645 |
| 646 Next candidate \ |
| 647 \ |
| 648 Node A (Pending) --> Node B(Pending) --> Node C (NoState) |
| 649 ^ | |
| 650 | | |
| 651 +-------------------------------------+ |
| 652 |
| 653 Now, when the Taskmaster examines the Node C's child Node A, |
| 654 it finds that Node A is in the "pending" state. Therefore, |
| 655 Node A is a pending child of node C. |
| 656 |
| 657 Pending children indicate that the Taskmaster has potentially |
| 658 loop back through a cycle. We say potentially because it could |
| 659 also occur when a DAG is evaluated in parallel. For example, |
| 660 consider the following graph: |
| 661 |
| 662 |
| 663 Node A (Pending) --> Node B(Pending) --> Node C (Pending) --> ... |
| 664 | ^ |
| 665 | | |
| 666 +----------> Node D (NoState) --------+ |
| 667 / |
| 668 Next candidate / |
| 669 |
| 670 The Taskmaster first evaluates the nodes A, B, and C and |
| 671 starts building some children of node C. Assuming, that the |
| 672 maximum parallel level has not been reached, the Taskmaster |
| 673 will examine Node D. It will find that Node C is a pending |
| 674 child of Node D. |
| 675 |
| 676 In summary, evaluating a graph with a cycle will always |
| 677 involve a pending child at one point. A pending child might |
| 678 indicate either a cycle or a diamond-shaped DAG. Only a |
| 679 fraction of the nodes ends-up being a "pending child" of |
| 680 another node. This keeps the pending_children set small in |
| 681 practice. |
| 682 |
| 683 We can differentiate between the two cases if we wait until |
| 684 the end of the build. At this point, all the pending children |
| 685 nodes due to a diamond-shaped DAG will have been properly |
| 686 built (or will have failed to build). But, the pending |
| 687 children involved in a cycle will still be in the pending |
| 688 state. |
| 689 |
| 690 The taskmaster removes nodes from the pending_children set as |
| 691 soon as a pending_children node moves out of the pending |
| 692 state. This also helps to keep the pending_children set small. |
| 693 """ |
| 694 |
| 695 for n in self.pending_children: |
| 696 assert n.state in (NODE_PENDING, NODE_EXECUTING), \ |
| 697 (str(n), StateString[n.state]) |
| 698 assert len(n.waiting_parents) != 0, (str(n), len(n.waiting_parents)) |
| 699 for p in n.waiting_parents: |
| 700 assert p.ref_count > 0, (str(n), str(p), p.ref_count) |
| 701 |
| 702 |
| 703 def trace_message(self, message): |
| 704 return 'Taskmaster: %s\n' % message |
| 705 |
| 706 def trace_node(self, node): |
| 707 return '<%-10s %-3s %s>' % (StateString[node.get_state()], |
| 708 node.ref_count, |
| 709 repr(str(node))) |
| 710 |
| 711 def _find_next_ready_node(self): |
| 712 """ |
| 713 Finds the next node that is ready to be built. |
| 714 |
| 715 This is *the* main guts of the DAG walk. We loop through the |
| 716 list of candidates, looking for something that has no un-built |
| 717 children (i.e., that is a leaf Node or has dependencies that are |
| 718 all leaf Nodes or up-to-date). Candidate Nodes are re-scanned |
| 719 (both the target Node itself and its sources, which are always |
| 720 scanned in the context of a given target) to discover implicit |
| 721 dependencies. A Node that must wait for some children to be |
| 722 built will be put back on the candidates list after the children |
| 723 have finished building. A Node that has been put back on the |
| 724 candidates list in this way may have itself (or its sources) |
| 725 re-scanned, in order to handle generated header files (e.g.) and |
| 726 the implicit dependencies therein. |
| 727 |
| 728 Note that this method does not do any signature calculation or |
| 729 up-to-date check itself. All of that is handled by the Task |
| 730 class. This is purely concerned with the dependency graph walk. |
| 731 """ |
| 732 |
| 733 self.ready_exc = None |
| 734 |
| 735 T = self.trace |
| 736 if T: T.write(u'\n' + self.trace_message('Looking for a node to evaluate
')) |
| 737 |
| 738 while True: |
| 739 node = self.next_candidate() |
| 740 if node is None: |
| 741 if T: T.write(self.trace_message('No candidate anymore.') + u'\n
') |
| 742 return None |
| 743 |
| 744 node = node.disambiguate() |
| 745 state = node.get_state() |
| 746 |
| 747 # For debugging only: |
| 748 # |
| 749 # try: |
| 750 # self._validate_pending_children() |
| 751 # except: |
| 752 # self.ready_exc = sys.exc_info() |
| 753 # return node |
| 754 |
| 755 if CollectStats: |
| 756 if not hasattr(node, 'stats'): |
| 757 node.stats = Stats() |
| 758 StatsNodes.append(node) |
| 759 S = node.stats |
| 760 S.considered = S.considered + 1 |
| 761 else: |
| 762 S = None |
| 763 |
| 764 if T: T.write(self.trace_message(u' Considering node %s and its c
hildren:' % self.trace_node(node))) |
| 765 |
| 766 if state == NODE_NO_STATE: |
| 767 # Mark this node as being on the execution stack: |
| 768 node.set_state(NODE_PENDING) |
| 769 elif state > NODE_PENDING: |
| 770 # Skip this node if it has already been evaluated: |
| 771 if S: S.already_handled = S.already_handled + 1 |
| 772 if T: T.write(self.trace_message(u' already handled (execu
ted)')) |
| 773 continue |
| 774 |
| 775 executor = node.get_executor() |
| 776 |
| 777 try: |
| 778 children = executor.get_all_children() |
| 779 except SystemExit: |
| 780 exc_value = sys.exc_info()[1] |
| 781 e = SCons.Errors.ExplicitExit(node, exc_value.code) |
| 782 self.ready_exc = (SCons.Errors.ExplicitExit, e) |
| 783 if T: T.write(self.trace_message(' SystemExit')) |
| 784 return node |
| 785 except Exception, e: |
| 786 # We had a problem just trying to figure out the |
| 787 # children (like a child couldn't be linked in to a |
| 788 # VariantDir, or a Scanner threw something). Arrange to |
| 789 # raise the exception when the Task is "executed." |
| 790 self.ready_exc = sys.exc_info() |
| 791 if S: S.problem = S.problem + 1 |
| 792 if T: T.write(self.trace_message(' exception %s while scan
ning children.\n' % e)) |
| 793 return node |
| 794 |
| 795 children_not_visited = [] |
| 796 children_pending = set() |
| 797 children_not_ready = [] |
| 798 children_failed = False |
| 799 |
| 800 for child in chain(executor.get_all_prerequisites(), children): |
| 801 childstate = child.get_state() |
| 802 |
| 803 if T: T.write(self.trace_message(u' ' + self.trace_node(ch
ild))) |
| 804 |
| 805 if childstate == NODE_NO_STATE: |
| 806 children_not_visited.append(child) |
| 807 elif childstate == NODE_PENDING: |
| 808 children_pending.add(child) |
| 809 elif childstate == NODE_FAILED: |
| 810 children_failed = True |
| 811 |
| 812 if childstate <= NODE_EXECUTING: |
| 813 children_not_ready.append(child) |
| 814 |
| 815 |
| 816 # These nodes have not even been visited yet. Add |
| 817 # them to the list so that on some next pass we can |
| 818 # take a stab at evaluating them (or their children). |
| 819 children_not_visited.reverse() |
| 820 self.candidates.extend(self.order(children_not_visited)) |
| 821 #if T and children_not_visited: |
| 822 # T.write(self.trace_message(' adding to candidates: %s' % ma
p(str, children_not_visited))) |
| 823 # T.write(self.trace_message(' candidates now: %s\n' % map(st
r, self.candidates))) |
| 824 |
| 825 # Skip this node if any of its children have failed. |
| 826 # |
| 827 # This catches the case where we're descending a top-level |
| 828 # target and one of our children failed while trying to be |
| 829 # built by a *previous* descent of an earlier top-level |
| 830 # target. |
| 831 # |
| 832 # It can also occur if a node is reused in multiple |
| 833 # targets. One first descends though the one of the |
| 834 # target, the next time occurs through the other target. |
| 835 # |
| 836 # Note that we can only have failed_children if the |
| 837 # --keep-going flag was used, because without it the build |
| 838 # will stop before diving in the other branch. |
| 839 # |
| 840 # Note that even if one of the children fails, we still |
| 841 # added the other children to the list of candidate nodes |
| 842 # to keep on building (--keep-going). |
| 843 if children_failed: |
| 844 for n in executor.get_action_targets(): |
| 845 n.set_state(NODE_FAILED) |
| 846 |
| 847 if S: S.child_failed = S.child_failed + 1 |
| 848 if T: T.write(self.trace_message('****** %s\n' % self.trace_node
(node))) |
| 849 continue |
| 850 |
| 851 if children_not_ready: |
| 852 for child in children_not_ready: |
| 853 # We're waiting on one or more derived targets |
| 854 # that have not yet finished building. |
| 855 if S: S.not_built = S.not_built + 1 |
| 856 |
| 857 # Add this node to the waiting parents lists of |
| 858 # anything we're waiting on, with a reference |
| 859 # count so we can be put back on the list for |
| 860 # re-evaluation when they've all finished. |
| 861 node.ref_count = node.ref_count + child.add_to_waiting_pare
nts(node) |
| 862 if T: T.write(self.trace_message(u' adjusted ref count:
%s, child %s' % |
| 863 (self.trace_node(node), repr(str(child))))) |
| 864 |
| 865 if T: |
| 866 for pc in children_pending: |
| 867 T.write(self.trace_message(' adding %s to the pend
ing children set\n' % |
| 868 self.trace_node(pc))) |
| 869 self.pending_children = self.pending_children | children_pending |
| 870 |
| 871 continue |
| 872 |
| 873 # Skip this node if it has side-effects that are |
| 874 # currently being built: |
| 875 wait_side_effects = False |
| 876 for se in executor.get_action_side_effects(): |
| 877 if se.get_state() == NODE_EXECUTING: |
| 878 se.add_to_waiting_s_e(node) |
| 879 wait_side_effects = True |
| 880 |
| 881 if wait_side_effects: |
| 882 if S: S.side_effects = S.side_effects + 1 |
| 883 continue |
| 884 |
| 885 # The default when we've gotten through all of the checks above: |
| 886 # this node is ready to be built. |
| 887 if S: S.build = S.build + 1 |
| 888 if T: T.write(self.trace_message(u'Evaluating %s\n' % |
| 889 self.trace_node(node))) |
| 890 |
| 891 # For debugging only: |
| 892 # |
| 893 # try: |
| 894 # self._validate_pending_children() |
| 895 # except: |
| 896 # self.ready_exc = sys.exc_info() |
| 897 # return node |
| 898 |
| 899 return node |
| 900 |
| 901 return None |
| 902 |
| 903 def next_task(self): |
| 904 """ |
| 905 Returns the next task to be executed. |
| 906 |
| 907 This simply asks for the next Node to be evaluated, and then wraps |
| 908 it in the specific Task subclass with which we were initialized. |
| 909 """ |
| 910 node = self._find_next_ready_node() |
| 911 |
| 912 if node is None: |
| 913 return None |
| 914 |
| 915 tlist = node.get_executor().get_all_targets() |
| 916 |
| 917 task = self.tasker(self, tlist, node in self.original_top, node) |
| 918 try: |
| 919 task.make_ready() |
| 920 except: |
| 921 # We had a problem just trying to get this task ready (like |
| 922 # a child couldn't be linked in to a VariantDir when deciding |
| 923 # whether this node is current). Arrange to raise the |
| 924 # exception when the Task is "executed." |
| 925 self.ready_exc = sys.exc_info() |
| 926 |
| 927 if self.ready_exc: |
| 928 task.exception_set(self.ready_exc) |
| 929 |
| 930 self.ready_exc = None |
| 931 |
| 932 return task |
| 933 |
| 934 def will_not_build(self, nodes, node_func=lambda n: None): |
| 935 """ |
| 936 Perform clean-up about nodes that will never be built. Invokes |
| 937 a user defined function on all of these nodes (including all |
| 938 of their parents). |
| 939 """ |
| 940 |
| 941 T = self.trace |
| 942 |
| 943 pending_children = self.pending_children |
| 944 |
| 945 to_visit = set(nodes) |
| 946 pending_children = pending_children - to_visit |
| 947 |
| 948 if T: |
| 949 for n in nodes: |
| 950 T.write(self.trace_message(' removing node %s from the pen
ding children set\n' % |
| 951 self.trace_node(n))) |
| 952 try: |
| 953 while len(to_visit): |
| 954 node = to_visit.pop() |
| 955 node_func(node) |
| 956 |
| 957 # Prune recursion by flushing the waiting children |
| 958 # list immediately. |
| 959 parents = node.waiting_parents |
| 960 node.waiting_parents = set() |
| 961 |
| 962 to_visit = to_visit | parents |
| 963 pending_children = pending_children - parents |
| 964 |
| 965 for p in parents: |
| 966 p.ref_count = p.ref_count - 1 |
| 967 if T: T.write(self.trace_message(' removing parent %s
from the pending children set\n' % |
| 968 self.trace_node(p))) |
| 969 except KeyError: |
| 970 # The container to_visit has been emptied. |
| 971 pass |
| 972 |
| 973 # We have the stick back the pending_children list into the |
| 974 # taskmaster because the python 1.5.2 compatibility does not |
| 975 # allow us to use in-place updates |
| 976 self.pending_children = pending_children |
| 977 |
| 978 def stop(self): |
| 979 """ |
| 980 Stops the current build completely. |
| 981 """ |
| 982 self.next_candidate = self.no_next_candidate |
| 983 |
| 984 def cleanup(self): |
| 985 """ |
| 986 Check for dependency cycles. |
| 987 """ |
| 988 if not self.pending_children: |
| 989 return |
| 990 |
| 991 nclist = [(n, find_cycle([n], set())) for n in self.pending_children] |
| 992 |
| 993 genuine_cycles = [ |
| 994 node for node,cycle in nclist |
| 995 if cycle or node.get_state() != NODE_EXECUTED |
| 996 ] |
| 997 if not genuine_cycles: |
| 998 # All of the "cycles" found were single nodes in EXECUTED state, |
| 999 # which is to say, they really weren't cycles. Just return. |
| 1000 return |
| 1001 |
| 1002 desc = 'Found dependency cycle(s):\n' |
| 1003 for node, cycle in nclist: |
| 1004 if cycle: |
| 1005 desc = desc + " " + " -> ".join(map(str, cycle)) + "\n" |
| 1006 else: |
| 1007 desc = desc + \ |
| 1008 " Internal Error: no cycle found for node %s (%s) in state
%s\n" % \ |
| 1009 (node, repr(node), StateString[node.get_state()]) |
| 1010 |
| 1011 raise SCons.Errors.UserError(desc) |
| 1012 |
| 1013 # Local Variables: |
| 1014 # tab-width:4 |
| 1015 # indent-tabs-mode:nil |
| 1016 # End: |
| 1017 # vim: set expandtab tabstop=4 shiftwidth=4: |
OLD | NEW |