Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(55)

Side by Side Diff: parallel_emerge

Issue 2891013: Integrate parallel_emerge with emerge, boosting performance. (Closed) Base URL: ssh://git@chromiumos-git/crosutils.git
Patch Set: Minor fixes Created 10 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/python2.6 1 #!/usr/bin/python2.6
2 # Copyright (c) 2010 The Chromium OS Authors. All rights reserved. 2 # Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Program to run emerge in parallel, for significant speedup. 6 """Program to run emerge in parallel, for significant speedup.
7 7
8 Usage: 8 Usage:
9 ./parallel_emerge [--board=BOARD] [--workon=PKGS] [--no-workon-deps] 9 ./parallel_emerge [--board=BOARD] [--workon=PKGS] [--no-workon-deps]
10 [emerge args] package" 10 [emerge args] package"
(...skipping 20 matching lines...) Expand all
31 or "--nodeps" write only access. 31 or "--nodeps" write only access.
32 Caveats: 32 Caveats:
33 * Some ebuild packages have incorrectly specified deps, and running 33 * Some ebuild packages have incorrectly specified deps, and running
34 them in parallel is more likely to bring out these failures. 34 them in parallel is more likely to bring out these failures.
35 * Some ebuilds (especially the build part) have complex dependencies 35 * Some ebuilds (especially the build part) have complex dependencies
36 that are not captured well by this script (it may be necessary to 36 that are not captured well by this script (it may be necessary to
37 install an old package to build, but then install a newer version 37 install an old package to build, but then install a newer version
38 of the same package for a runtime dep). 38 of the same package for a runtime dep).
39 """ 39 """
40 40
41 import copy
42 import multiprocessing
41 import os 43 import os
42 import re 44 import Queue
43 import shlex 45 import shlex
44 import subprocess
45 import sys 46 import sys
46 import tempfile 47 import tempfile
47 import time 48 import time
48 import _emerge.main 49 import urllib2
50
51 # If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On
52 # Chromium OS, the default "portage" user doesn't have the necessary
53 # permissions. It'd be easier if we could default to $USERNAME, but $USERNAME
54 # is "root" here because we get called through sudo.
55 #
56 # We need to set this before importing any portage modules, because portage
57 # looks up "PORTAGE_USERNAME" at import time.
58 #
59 # NOTE: .bashrc sets PORTAGE_USERNAME = $USERNAME, so most people won't
60 # encounter this case unless they have an old chroot or blow away the
61 # environment by running sudo without the -E specifier.
62 if "PORTAGE_USERNAME" not in os.environ:
63 homedir = os.environ["HOME"]
64 if homedir.startswith("/home/"):
65 os.environ["PORTAGE_USERNAME"] = homedir.split("/")[2]
66
67 # Portage doesn't expose dependency trees in its public API, so we have to
68 # make use of some private APIs here. These modules are found under
69 # /usr/lib/portage/pym/.
70 #
71 # TODO(davidjames): Update Portage to expose public APIs for these features.
72 from _emerge.actions import adjust_configs
73 from _emerge.actions import load_emerge_config
74 from _emerge.create_depgraph_params import create_depgraph_params
75 from _emerge.depgraph import backtrack_depgraph
76 from _emerge.main import emerge_main
77 from _emerge.main import parse_opts
78 from _emerge.Package import Package
79 from _emerge.Scheduler import Scheduler
80 from _emerge.stdout_spinner import stdout_spinner
81 import portage
82 import portage.debug
49 83
50 84
51 def Usage(): 85 def Usage():
52 """Print usage.""" 86 """Print usage."""
53 print "Usage:" 87 print "Usage:"
54 print " ./parallel_emerge [--board=BOARD] [--workon=PKGS] [--no-workon-deps]" 88 print " ./parallel_emerge [--board=BOARD] [--workon=PKGS] [--no-workon-deps]"
55 print " [emerge args] package" 89 print " [--rebuild] [emerge args] package"
56 print 90 print
57 print "Packages specified as workon packages are always built from source." 91 print "Packages specified as workon packages are always built from source."
58 print "Unless --no-workon-deps is specified, packages that depend on these" 92 print "Unless --no-workon-deps is specified, packages that depend on these"
59 print "packages are also built from source." 93 print "packages are also built from source."
60 print 94 print
61 print "The --workon argument is mainly useful when you want to build and" 95 print "The --workon argument is mainly useful when you want to build and"
62 print "install packages that you are working on unconditionally, but do not" 96 print "install packages that you are working on unconditionally, but do not"
63 print "to have to rev the package to indicate you want to build it from" 97 print "to have to rev the package to indicate you want to build it from"
64 print "source. The build_packages script will automatically supply the" 98 print "source. The build_packages script will automatically supply the"
65 print "workon argument to emerge, ensuring that packages selected using" 99 print "workon argument to emerge, ensuring that packages selected using"
66 print "cros-workon are rebuilt." 100 print "cros-workon are rebuilt."
101 print
102 print "The --rebuild option rebuilds packages whenever their dependencies"
103 print "are changed. This ensures that your build is correct."
67 sys.exit(1) 104 sys.exit(1)
68 105
69 106
70 # These are dependencies that are not specified in the package, 107 # These are dependencies that are not specified in the package,
71 # but will prevent the package from installing. 108 # but will prevent the package from installing.
72 secret_deps = {} 109 secret_deps = {}
73 110
74 # Runtime flags. TODO(): Maybe make these command-line options or
75 # environment variables.
76 VERBOSE = False
77 AUTOCLEAN = False
78
79 # Global start time 111 # Global start time
80 GLOBAL_START = time.time() 112 GLOBAL_START = time.time()
81 113
82 114
83 def ParseArgs(argv): 115 class EmergeData(object):
84 """Set global vars based on command line. 116 """This simple struct holds various emerge variables.
85 117
86 We need to be compatible with emerge arg format. 118 This struct helps us easily pass emerge variables around as a unit.
87 We scrape arguments that are specific to parallel_emerge, and pass through 119 These variables are used for calculating dependencies and installing
88 the rest directly to emerge. 120 packages.
89 Args:
90 argv: arguments list
91 Returns:
92 triplet of (package list, emerge argumens, board string)
93 """ 121 """
94 if VERBOSE: 122
95 print argv 123 __slots__ = ["action", "cmdline_packages", "depgraph", "mtimedb", "opts",
96 workon_set = set() 124 "root_config", "scheduler_graph", "settings", "spinner",
97 myopts = {} 125 "trees"]
98 myopts["workon"] = workon_set 126
99 emerge_args = [] 127 def __init__(self):
100 for arg in argv[1:]: 128 # The action the user requested. If the user is installing packages, this
101 # Specifically match arguments that are specific to parallel_emerge, and 129 # is None. If the user is doing anything other than installing packages,
102 # pass through the rest. 130 # this will contain the action name, which will map exactly to the
103 if arg.startswith("--board="): 131 # long-form name of the associated emerge option.
104 myopts["board"] = arg.replace("--board=", "") 132 #
105 elif arg.startswith("--workon="): 133 # Example: If you call parallel_emerge --unmerge package, the action name
106 workon_str = arg.replace("--workon=", "") 134 # will be "unmerge"
107 workon_set.update(shlex.split(" ".join(shlex.split(workon_str)))) 135 self.action = None
108 elif arg == "--no-workon-deps": 136
109 myopts["no-workon-deps"] = True 137 # The list of packages the user passed on the command-line.
138 self.cmdline_packages = None
139
140 # The emerge dependency graph. It'll contain all the packages involved in
141 # this merge, along with their versions.
142 self.depgraph = None
143
144 # A dict of the options passed to emerge. This dict has been cleaned up
145 # a bit by parse_opts, so that it's a bit easier for the emerge code to
146 # look at the options.
147 #
148 # Emerge takes a few shortcuts in its cleanup process to make parsing of
149 # the options dict easier. For example, if you pass in "--usepkg=n", the
150 # "--usepkg" flag is just left out of the dictionary altogether. Because
151 # --usepkg=n is the default, this makes parsing easier, because emerge
152 # can just assume that if "--usepkg" is in the dictionary, it's enabled.
153 #
154 # These cleanup processes aren't applied to all options. For example, the
155 # --with-bdeps flag is passed in as-is. For a full list of the cleanups
156 # applied by emerge, see the parse_opts function in the _emerge.main
157 # package.
158 self.opts = None
159
160 # A dictionary used by portage to maintain global state. This state is
161 # loaded from disk when portage starts up, and saved to disk whenever we
162 # call mtimedb.commit().
163 #
164 # This database contains information about global updates (i.e., what
165 # version of portage we have) and what we're currently doing. Portage
166 # saves what it is currently doing in this database so that it can be
167 # resumed when you call it with the --resume option.
168 #
169 # parallel_emerge does not save what it is currently doing in the mtimedb,
170 # so we do not support the --resume option.
171 self.mtimedb = None
172
173 # The portage configuration for our current root. This contains the portage
174 # settings (see below) and the three portage trees for our current root.
175 # (The three portage trees are explained below, in the documentation for
176 # the "trees" member.)
177 self.root_config = None
178
179 # The scheduler graph is used by emerge to calculate what packages to
180 # install. We don't actually install any deps, so this isn't really used,
181 # but we pass it in to the Scheduler object anyway.
182 self.scheduler_graph = None
183
184 # Portage settings for our current session. Most of these settings are set
185 # in make.conf inside our current install root.
186 self.settings = None
187
188 # The spinner, which spews stuff to stdout to indicate that portage is
189 # doing something. We maintain our own spinner, so we set the portage
190 # spinner to "silent" mode.
191 self.spinner = None
192
193 # The portage trees. There are separate portage trees for each root. To get
194 # the portage tree for the current root, you can look in self.trees[root],
195 # where root = self.settings["ROOT"].
196 #
197 # In each root, there are three trees: vartree, porttree, and bintree.
198 # - vartree: A database of the currently-installed packages.
199 # - porttree: A database of ebuilds, that can be used to build packages.
200 # - bintree: A database of binary packages.
201 self.trees = None
202
203
204 class DepGraphGenerator(object):
205 """Grab dependency information about packages from portage.
206
207 Typical usage:
208 deps = DepGraphGenerator()
209 deps.Initialize(sys.argv[1:])
210 deps_tree, deps_info = deps.GenDependencyTree()
211 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
212 deps.PrintTree(deps_tree)
213 PrintDepsMap(deps_graph)
214 """
215
216 __slots__ = ["board", "emerge", "mandatory_source", "no_workon_deps",
217 "package_db", "rebuild"]
218
219 def __init__(self):
220 self.board = None
221 self.emerge = EmergeData()
222 self.mandatory_source = set()
223 self.no_workon_deps = False
224 self.package_db = {}
225 self.rebuild = False
226
227 def ParseParallelEmergeArgs(self, argv):
228 """Read the parallel emerge arguments from the command-line.
229
230 We need to be compatible with emerge arg format. We scrape arguments that
231 are specific to parallel_emerge, and pass through the rest directly to
232 emerge.
233 Args:
234 argv: arguments list
235 Returns:
236 Arguments that don't belong to parallel_emerge
237 """
238 emerge_args = []
239 for arg in argv:
240 # Specifically match arguments that are specific to parallel_emerge, and
241 # pass through the rest.
242 if arg.startswith("--board="):
243 self.board = arg.replace("--board=", "")
244 elif arg.startswith("--workon="):
245 workon_str = arg.replace("--workon=", "")
246 package_list = shlex.split(" ".join(shlex.split(workon_str)))
247 self.mandatory_source.update(package_list)
248 elif arg == "--no-workon-deps":
249 self.no_workon_deps = True
250 elif arg == "--rebuild":
251 self.rebuild = True
252 else:
253 # Not one of our options, so pass through to emerge.
254 emerge_args.append(arg)
255
256 if self.rebuild:
257 if self.no_workon_deps:
258 print "--rebuild is not compatible with --no-workon-deps"
259 sys.exit(1)
260
261 return emerge_args
262
263 def Initialize(self, args):
264 """Initializer. Parses arguments and sets up portage state."""
265
266 # Parse and strip out args that are just intended for parallel_emerge.
267 emerge_args = self.ParseParallelEmergeArgs(args)
268
269 # Setup various environment variables based on our current board. These
270 # variables are normally setup inside emerge-${BOARD}, but since we don't
271 # call that script, we have to set it up here. These variables serve to
272 # point our tools at /build/BOARD and to setup cross compiles to the
273 # appropriate board as configured in toolchain.conf.
274 if self.board:
275 os.environ["PORTAGE_CONFIGROOT"] = "/build/" + self.board
276 os.environ["PORTAGE_SYSROOT"] = "/build/" + self.board
277 os.environ["SYSROOT"] = "/build/" + self.board
278 scripts_dir = os.path.dirname(os.path.realpath(__file__))
279 toolchain_path = "%s/../overlays/overlay-%s/toolchain.conf"
280 f = open(toolchain_path % (scripts_dir, self.board))
281 os.environ["CHOST"] = f.readline().strip()
282 f.close()
283
284 # Although CHROMEOS_ROOT isn't specific to boards, it's normally setup
285 # inside emerge-${BOARD}, so we set it up here for compatibility. It
286 # will be going away soon as we migrate to CROS_WORKON_SRCROOT.
287 os.environ.setdefault("CHROMEOS_ROOT", os.environ["HOME"] + "/trunk")
288
289 # Modify the environment to disable locking.
290 os.environ["PORTAGE_LOCKS"] = "false"
291 os.environ["UNMERGE_DELAY"] = "0"
292
293 # Parse the emerge options.
294 action, opts, cmdline_packages = parse_opts(emerge_args)
295
296 # If we're installing to the board, we want the --root-deps option so that
297 # portage will install the build dependencies to that location as well.
298 if self.board:
299 opts.setdefault("--root-deps", True)
300
301 # Set environment variables based on options. Portage normally sets these
302 # environment variables in emerge_main, but we can't use that function,
303 # because it also does a bunch of other stuff that we don't want.
304 # TODO(davidjames): Patch portage to move this logic into a function we can
305 # reuse here.
306 if "--debug" in opts:
307 os.environ["PORTAGE_DEBUG"] = "1"
308 if "--config-root" in opts:
309 os.environ["PORTAGE_CONFIGROOT"] = opts["--config-root"]
310 if "--root" in opts:
311 os.environ["ROOT"] = opts["--root"]
312 if "--accept-properties" in opts:
313 os.environ["ACCEPT_PROPERTIES"] = opts["--accept-properties"]
314
315 # Now that we've setup the necessary environment variables, we can load the
316 # emerge config from disk.
317 settings, trees, mtimedb = load_emerge_config()
318
319 # Check whether our portage tree is out of date. Typically, this happens
320 # when you're setting up a new portage tree, such as in setup_board and
321 # make_chroot. In that case, portage applies a bunch of global updates
322 # here. Once the updates are finished, we need to commit any changes
323 # that the global update made to our mtimedb, and reload the config.
324 #
325 # Portage normally handles this logic in emerge_main, but again, we can't
326 # use that function here.
327 if portage._global_updates(trees, mtimedb["updates"]):
328 mtimedb.commit()
329 settings, trees, mtimedb = load_emerge_config(trees=trees)
330
331 # Setup implied options. Portage normally handles this logic in
332 # emerge_main.
333 if "--buildpkgonly" in opts or "buildpkg" in settings.features:
334 opts.setdefault("--buildpkg", True)
335 if "--getbinpkgonly" in opts:
336 opts.setdefault("--usepkgonly", True)
337 opts.setdefault("--getbinpkg", True)
338 if "getbinpkg" in settings.features:
339 # Per emerge_main, FEATURES=getbinpkg overrides --getbinpkg=n
340 opts["--getbinpkg"] = True
341 if "--getbinpkg" in opts or "--usepkgonly" in opts:
342 opts.setdefault("--usepkg", True)
343 if "--fetch-all-uri" in opts:
344 opts.setdefault("--fetchonly", True)
345 if "--skipfirst" in opts:
346 opts.setdefault("--resume", True)
347 if "--buildpkgonly" in opts:
348 # --buildpkgonly will not merge anything, so it overrides all binary
349 # package options.
350 for opt in ("--getbinpkg", "--getbinpkgonly",
351 "--usepkg", "--usepkgonly"):
352 opts.pop(opt, None)
353 if (settings.get("PORTAGE_DEBUG", "") == "1" and
354 "python-trace" in settings.features):
355 portage.debug.set_trace(True)
356
357 # Complain about unsupported options
358 for opt in ("--ask", "--ask-enter-invalid", "--complete-graph",
359 "--resume", "--skipfirst"):
360 if opt in opts:
361 print "%s is not supported by parallel_emerge" % opt
362 sys.exit(1)
363
364 # Make emerge specific adjustments to the config (e.g. colors!)
365 adjust_configs(opts, trees)
366
367 # Save our configuration so far in the emerge object
368 emerge = self.emerge
369 emerge.action, emerge.opts = action, opts
370 emerge.settings, emerge.trees, emerge.mtimedb = settings, trees, mtimedb
371 emerge.cmdline_packages = cmdline_packages
372 root = settings["ROOT"]
373 emerge.root_config = trees[root]["root_config"]
374
375 def GenDependencyTree(self):
376 """Get dependency tree info from emerge.
377
378 TODO(): Update cros_extract_deps to also use this code.
379 Returns:
380 Dependency tree
381 """
382 start = time.time()
383
384 # Setup emerge options.
385 #
386 # We treat dependency info a bit differently than emerge itself. Unless
387 # you're using --usepkgonly, we disable --getbinpkg and --usepkg here so
388 # that emerge will look at the dependencies of the source ebuilds rather
389 # than the binary dependencies. This helps ensure that we have the option
390 # of merging a package from source, if we want to switch to it with
391 # --workon and the dependencies have changed.
392 emerge = self.emerge
393 emerge_opts = emerge.opts.copy()
394 emerge_opts.pop("--getbinpkg", None)
395 if "--usepkgonly" not in emerge_opts:
396 emerge_opts.pop("--usepkg", None)
397 if self.mandatory_source or self.rebuild:
398 # Enable --emptytree so that we get the full tree, which we need for
399 # dependency analysis. By default, with this option, emerge optimizes
400 # the graph by removing uninstall instructions from the graph. By
401 # specifying --tree as well, we tell emerge that it's not safe to remove
402 # uninstall instructions because we're planning on analyzing the output.
403 emerge_opts["--tree"] = True
404 emerge_opts["--emptytree"] = True
405
406 # Create a list of packages to merge
407 packages = set(emerge.cmdline_packages[:])
408 if self.mandatory_source:
409 packages.update(self.mandatory_source)
410
411 # Tell emerge to be quiet. We print plenty of info ourselves so we don't
412 # need any extra output from portage.
413 portage.util.noiselimit = -1
414
415 # My favorite feature: The silent spinner. It doesn't spin. Ever.
416 # I'd disable the colors by default too, but they look kind of cool.
417 emerge.spinner = stdout_spinner()
418 emerge.spinner.update = emerge.spinner.update_quiet
419
420 if "--quiet" not in emerge.opts:
421 print "Calculating deps..."
422
423 # Ask portage to build a dependency graph. with the options we specified
424 # above.
425 params = create_depgraph_params(emerge_opts, emerge.action)
426 success, depgraph, _ = backtrack_depgraph(
427 emerge.settings, emerge.trees, emerge_opts, params, emerge.action,
428 packages, emerge.spinner)
429 emerge.depgraph = depgraph
430
431 # Is it impossible to honor the user's request? Bail!
432 if not success:
433 depgraph.display_problems()
434 sys.exit(1)
435
436 # Build our own tree from the emerge digraph.
437 deps_tree = {}
438 digraph = depgraph._dynamic_config.digraph
439 for node, node_deps in digraph.nodes.items():
440 # Calculate dependency packages that need to be installed first. Each
441 # child on the digraph is a dependency. The "operation" field specifies
442 # what we're doing (e.g. merge, uninstall, etc.). The "priorities" array
443 # contains the type of dependency (e.g. build, runtime, runtime_post,
444 # etc.)
445 #
446 # Emerge itself actually treats some dependencies as "soft" dependencies
447 # and sometimes ignores them. We don't do that -- we honor all
448 # dependencies unless we're forced to prune them because they're cyclic.
449 #
450 # Portage refers to the identifiers for packages as a CPV. This acronym
451 # stands for Component/Path/Version.
452 #
453 # Here's an example CPV: chromeos-base/power_manager-0.0.1-r1
454 # Split up, this CPV would be:
455 # C -- Component: chromeos-base
456 # P -- Path: power_manager
457 # V -- Version: 0.0.1-r1
458 #
459 # We just refer to CPVs as packages here because it's easier.
460 deps = {}
461 for child, priorities in node_deps[0].items():
462 deps[str(child.cpv)] = dict(action=str(child.operation),
463 deptype=str(priorities[-1]),
464 deps={})
465
466 # We've built our list of deps, so we can add our package to the tree.
467 if isinstance(node, Package):
468 deps_tree[str(node.cpv)] = dict(action=str(node.operation),
469 deps=deps)
470
471 emptytree = "--emptytree" in emerge.opts
472
473 # Ask portage for its install plan, so that we can only throw out
474 # dependencies that portage throws out. Also, keep track of the old
475 # versions of packages that we're either upgrading or replacing.
476 #
477 # The "vardb" is the database of installed packages.
478 vardb = emerge.trees[emerge.settings["ROOT"]]["vartree"].dbapi
479 deps_info = {}
480 for pkg in depgraph.altlist():
481 if isinstance(pkg, Package):
482 # If we're not in emptytree mode, and we're going to replace a package
483 # that is already installed, then this operation is possibly optional.
484 # ("--selective" mode is handled later, in RemoveInstalledPackages())
485 optional = False
486 if not emptytree and vardb.cpv_exists(pkg.cpv):
487 optional = True
488
489 # Add the package to our database.
490 self.package_db[str(pkg.cpv)] = pkg
491
492 # Save off info about the package
493 deps_info[str(pkg.cpv)] = {"idx": len(deps_info),
494 "optional": optional}
495
496 # Delete the --tree option, because we don't really want to display a
497 # tree. We just wanted to get emerge to leave uninstall instructions on
498 # the graph. Later, when we display the graph, we'll want standard-looking
499 # output, so removing the --tree option is important.
500 depgraph._frozen_config.myopts.pop("--tree", None)
501
502 seconds = time.time() - start
503 if "--quiet" not in emerge.opts:
504 print "Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60)
505
506 return deps_tree, deps_info
507
508 def PrintTree(self, deps, depth=""):
509 """Print the deps we have seen in the emerge output.
510
511 Args:
512 deps: Dependency tree structure.
513 depth: Allows printing the tree recursively, with indentation.
514 """
515 for entry in sorted(deps):
516 action = deps[entry]["action"]
517 print "%s %s (%s)" % (depth, entry, action)
518 self.PrintTree(deps[entry]["deps"], depth=depth + " ")
519
520 def GenDependencyGraph(self, deps_tree, deps_info):
521 """Generate a doubly linked dependency graph.
522
523 Args:
524 deps_tree: Dependency tree structure.
525 deps_info: More details on the dependencies.
526 Returns:
527 Deps graph in the form of a dict of packages, with each package
528 specifying a "needs" list and "provides" list.
529 """
530 emerge = self.emerge
531 root = emerge.settings["ROOT"]
532
533 # It's useful to know what packages will actually end up on the
534 # system at some point. Packages in final_db are either already
535 # installed, or will be installed by the time we're done.
536 final_db = emerge.depgraph._dynamic_config.mydbapi[root]
537
538 # final_pkgs is a set of the packages we found in the final_db. These
539 # packages are either already installed, or will be installed by the time
540 # we're done. It's populated in BuildFinalPackageSet()
541 final_pkgs = set()
542
543 # deps_map is the actual dependency graph.
544 #
545 # Each package specifies a "needs" list and a "provides" list. The "needs"
546 # list indicates which packages we depend on. The "provides" list
547 # indicates the reverse dependencies -- what packages need us.
548 #
549 # We also provide some other information in the dependency graph:
550 # - action: What we're planning on doing with this package. Generally,
551 # "merge", "nomerge", or "uninstall"
552 # - mandatory_source:
553 # If true, indicates that this package must be compiled from source.
554 # We set this for "workon" packages, and for packages where the
555 # binaries are known to be out of date.
556 # - mandatory:
557 # If true, indicates that this package must be installed. We don't care
558 # whether it's binary or source, unless the mandatory_source flag is
559 # also set.
560 #
561 deps_map = {}
562
563 def ReverseTree(packages):
564 """Convert tree to digraph.
565
566 Take the tree of package -> requirements and reverse it to a digraph of
567 buildable packages -> packages they unblock.
568 Args:
569 packages: Tree(s) of dependencies.
570 Returns:
571 Unsanitized digraph.
572 """
573 for pkg in packages:
574
575 # Create an entry for the package
576 action = packages[pkg]["action"]
577 default_pkg = {"needs": {}, "provides": set(), "action": action,
578 "mandatory_source": False, "mandatory": False}
579 this_pkg = deps_map.setdefault(pkg, default_pkg)
580
581 # Create entries for dependencies of this package first.
582 ReverseTree(packages[pkg]["deps"])
583
584 # Add dependencies to this package.
585 for dep, dep_item in packages[pkg]["deps"].iteritems():
586 dep_pkg = deps_map[dep]
587 dep_type = dep_item["deptype"]
588 if dep_type != "runtime_post":
589 dep_pkg["provides"].add(pkg)
590 this_pkg["needs"][dep] = dep_type
591
592 def BuildFinalPackageSet():
593 # If this package is installed, or will get installed, add it to
594 # final_pkgs
595 for pkg in deps_map:
596 for match in final_db.match_pkgs(pkg):
597 final_pkgs.add(str(match.cpv))
598
599 def FindCycles():
600 """Find cycles in the dependency tree.
601
602 Returns:
603 Dict of packages involved in cyclic dependencies, mapping each package
604 to a list of the cycles the package is involved in.
605 """
606
607 def FindCyclesAtNode(pkg, cycles, unresolved, resolved):
608 """Find cycles in cyclic dependencies starting at specified package.
609
610 Args:
611 pkg: Package identifier.
612 cycles: Set of cycles so far.
613 unresolved: Nodes that have been visited but are not fully processed.
614 resolved: Nodes that have been visited and are fully processed.
615 Returns:
616 Whether a cycle was found.
617 """
618 if pkg in resolved:
619 return
620 unresolved.append(pkg)
621 for dep in deps_map[pkg]["needs"]:
622 if dep in unresolved:
623 idx = unresolved.index(dep)
624 mycycle = unresolved[idx:] + [dep]
625 for cycle_pkg in mycycle:
626 info = cycles.setdefault(cycle_pkg, {})
627 info.setdefault("pkgs", set()).update(mycycle)
628 info.setdefault("cycles", []).append(mycycle)
629 else:
630 FindCyclesAtNode(dep, cycles, unresolved, resolved)
631 unresolved.pop()
632 resolved.add(pkg)
633
634 cycles, unresolved, resolved = {}, [], set()
635 for pkg in deps_map:
636 FindCyclesAtNode(pkg, cycles, unresolved, resolved)
637 return cycles
638
639 def RemoveInstalledPackages():
640 """Remove installed packages, propagating dependencies."""
641
642 # If we're not in selective mode, the packages on the command line are
643 # not optional.
644 if "--selective" in emerge.opts:
645 selective = emerge.opts["--selective"] != "n"
646 else:
647 selective = "--noreplace" in emerge.opts or "--update" in emerge.opts
648 if not selective:
649 for pkg in emerge.cmdline_packages:
650 for db_pkg in final_db.match_pkgs(pkg):
651 deps_info[db_pkg.cpv]["optional"] = False
652
653 # Schedule packages that aren't on the install list for removal
654 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
655
656 # Schedule optional packages for removal
657 for pkg, info in deps_info.items():
658 if info["optional"]:
659 rm_pkgs.add(pkg)
660
661 # Remove the packages we don't want, simplifying the graph and making
662 # it easier for us to crack cycles.
663 for pkg in sorted(rm_pkgs):
664 this_pkg = deps_map[pkg]
665 needs = this_pkg["needs"]
666 provides = this_pkg["provides"]
667 for dep in needs:
668 dep_provides = deps_map[dep]["provides"]
669 dep_provides.update(provides)
670 dep_provides.discard(pkg)
671 dep_provides.discard(dep)
672 for target in provides:
673 target_needs = deps_map[target]["needs"]
674 target_needs.update(needs)
675 target_needs.pop(pkg, None)
676 target_needs.pop(target, None)
677 del deps_map[pkg]
678
679 def SanitizeTree(cycles):
680 """Remove circular dependencies.
681
682 We only prune circular dependencies that go against the emerge ordering.
683 This has a nice property: we're guaranteed to merge dependencies in the
684 same order that portage does.
685
686 Because we don't treat any dependencies as "soft" unless they're killed
687 by a cycle, we pay attention to a larger number of dependencies when
688 merging. This hurts performance a bit, but helps reliability.
689
690 Args:
691 cycles: Dict of packages involved in cyclic dependencies, mapping each
692 package to a list of the cycles the package is involved in. Produced
693 by FindCycles().
694 """
695 for basedep in set(cycles).intersection(deps_map):
696 this_pkg = deps_map[basedep]
697 for dep in this_pkg["provides"].intersection(cycles[basedep]["pkgs"]):
698 if deps_info[basedep]["idx"] >= deps_info[dep]["idx"]:
699 for mycycle in cycles[basedep]["cycles"]:
700 if dep in mycycle:
701 print "Breaking %s -> %s in cycle:" % (dep, basedep)
702 for i in range(len(mycycle) - 1):
703 needs = deps_map[mycycle[i]]["needs"]
704 deptype = needs.get(mycycle[i+1], "deleted")
705 print " %s -> %s (%s)" % (mycycle[i], mycycle[i+1], deptype)
706 del deps_map[dep]["needs"][basedep]
707 this_pkg["provides"].remove(dep)
708 break
709
710 def AddSecretDeps():
711 """Find these tagged packages and add extra dependencies.
712
713 For debugging dependency problems.
714 """
715 for bad in secret_deps:
716 needed = secret_deps[bad]
717 bad_pkg = None
718 needed_pkg = None
719 for dep in deps_map:
720 if dep.find(bad) != -1:
721 bad_pkg = dep
722 if dep.find(needed) != -1:
723 needed_pkg = dep
724 if bad_pkg and needed_pkg:
725 deps_map[needed_pkg]["provides"].add(bad_pkg)
726 deps_map[bad_pkg]["needs"][needed_pkg] = "secret"
727
728 def MergeChildren(pkg, merge_type):
729 """Merge this package and all packages it provides."""
730
731 this_pkg = deps_map[pkg]
732 if this_pkg[merge_type] or pkg not in final_pkgs:
733 return set()
734
735 # Mark this package as non-optional
736 deps_info[pkg]["optional"] = False
737 this_pkg[merge_type] = True
738 for w in this_pkg["provides"]:
739 MergeChildren(w, merge_type)
740
741 if this_pkg["action"] == "nomerge":
742 this_pkg["action"] = "merge"
743
744 def RemotePackageDatabase():
745 """Grab the latest binary package database from the prebuilt server.
746
747 We need to know the modification times of the prebuilt packages so that we
748 know when it is OK to use these packages and when we should rebuild them
749 instead.
750
751 Returns:
752 A dict mapping package identifiers to modification times.
753 """
754 url = self.emerge.settings["PORTAGE_BINHOST"] + "/Packages"
755
756 prebuilt_pkgs = {}
757 f = urllib2.urlopen(url)
758 for line in f:
759 if line.startswith("CPV: "):
760 pkg = line.replace("CPV: ", "").rstrip()
761 elif line.startswith("MTIME: "):
762 prebuilt_pkgs[pkg] = int(line[:-1].replace("MTIME: ", ""))
763 f.close()
764
765 return prebuilt_pkgs
766
767 def LocalPackageDatabase():
768 """Get the modification times of the packages in the local database.
769
770 We need to know the modification times of the local packages so that we
771 know when they need to be rebuilt.
772
773 Returns:
774 A dict mapping package identifiers to modification times.
775 """
776 if self.board:
777 path = "/build/%s/packages/Packages" % self.board
778 else:
779 path = "/var/lib/portage/pkgs/Packages"
780 local_pkgs = {}
781 for line in file(path):
782 if line.startswith("CPV: "):
783 pkg = line.replace("CPV: ", "").rstrip()
784 elif line.startswith("MTIME: "):
785 local_pkgs[pkg] = int(line[:-1].replace("MTIME: ", ""))
786
787 return local_pkgs
788
789 def AutoRebuildDeps(local_pkgs, remote_pkgs, cycles):
790 """Recursively rebuild packages when necessary using modification times.
791
792 If you've modified a package, it's a good idea to rebuild all the packages
793 that depend on it from source. This function looks for any packages which
794 depend on packages that have been modified and ensures that they get
795 rebuilt.
796
797 Args:
798 local_pkgs: Modification times from the local database.
799 remote_pkgs: Modification times from the prebuilt server.
800 cycles: Dictionary returned from FindCycles()
801
802 Returns:
803 The set of packages we marked as needing to be merged.
804 """
805
806 def PrebuiltsReady(pkg, pkg_db, cache):
807 """Check whether the prebuilts are ready for pkg and all deps.
808
809 Args:
810 pkg: The specified package.
811 pkg_db: The package DB to use.
812 cache: A dict, where the results are stored.
813
814 Returns:
815 True iff the prebuilts are ready for pkg and all deps.
816 """
817 if pkg in cache:
818 return cache[pkg]
819 if pkg not in pkg_db:
820 cache[pkg] = False
821 else:
822 for dep in deps_map[pkg]["needs"]:
823 if not PrebuiltsReady(dep, pkg_db, cache):
824 cache[pkg] = False
825 break
826 return cache.setdefault(pkg, True)
827
828 def LastModifiedWithDeps(pkg, pkg_db, cache):
829 """Calculate the last modified time of a package and its dependencies.
830
831 This function looks at all the packages needed by the specified package
832 and checks the most recent modification time of all of those packages.
833 If the dependencies of a package were modified more recently than the
834 package itself, then we know the package needs to be rebuilt.
835
836 Args:
837 pkg: The specified package.
838 pkg_db: The package DB to use.
839 cache: A dict, where the last modified times are stored.
840
841 Returns:
842 The last modified time of the specified package and its dependencies.
843 """
844 if pkg in cache:
845 return cache[pkg]
846
847 cache[pkg] = pkg_db.get(pkg, 0)
848 for dep in deps_map[pkg]["needs"]:
849 t = LastModifiedWithDeps(dep, pkg_db, cache)
850 cache[pkg] = max(cache[pkg], t)
851 return cache[pkg]
852
853 # For every package that's getting updated in our local cache (binary
854 # or source), make sure we also update the children. If a package is
855 # built from source, all children must also be built from source.
856 local_ready_cache, remote_ready_cache = {}, {}
857 local_mtime_cache, remote_mtime_cache = {}, {}
858 for pkg in final_pkgs:
859 # If all the necessary local packages are ready, and their
860 # modification times are in sync, we don't need to do anything here.
861 local_mtime = LastModifiedWithDeps(pkg, local_pkgs, local_mtime_cache)
862 local_ready = PrebuiltsReady(pkg, local_pkgs, local_ready_cache)
863 if (not local_ready or local_pkgs.get(pkg, 0) < local_mtime and
864 pkg not in cycles):
865 # OK, at least one package is missing from the local cache or is
866 # outdated. This means we're going to have to install the package
867 # and all dependencies.
868 #
869 # If all the necessary remote packages are ready, and they're at
870 # least as new as our local packages, we can install them.
871 # Otherwise, we need to build from source.
872 remote_mtime = LastModifiedWithDeps(pkg, remote_pkgs,
873 remote_mtime_cache)
874 remote_ready = PrebuiltsReady(pkg, remote_pkgs, remote_ready_cache)
875 if remote_ready and (local_mtime <= remote_mtime or pkg in cycles):
876 MergeChildren(pkg, "mandatory")
877 else:
878 MergeChildren(pkg, "mandatory_source")
879
880 def UsePrebuiltPackages():
881 """Update packages that can use prebuilts to do so."""
882 start = time.time()
883
884 # The bintree is the database of binary packages. By default, it's
885 # empty.
886 bintree = emerge.trees[root]["bintree"]
887 bindb = bintree.dbapi
888 root_config = emerge.root_config
889 prebuilt_pkgs = {}
890
891 # Populate the DB with packages
892 bintree.populate("--getbinpkg" in emerge.opts,
893 "--getbinpkgonly" in emerge.opts)
894
895 # Update packages that can use prebuilts to do so.
896 for pkg, info in deps_map.iteritems():
897 if info and not info["mandatory_source"] and info["action"] == "merge":
898 db_keys = list(bindb._aux_cache_keys)
899 try:
900 db_vals = bindb.aux_get(pkg, db_keys + ["MTIME"])
901 except KeyError:
902 # No binary package
903 continue
904 mtime = int(db_vals.pop() or 0)
905 metadata = zip(db_keys, db_vals)
906 db_pkg = Package(built=True, cpv=pkg, installed=False,
907 metadata=metadata, onlydeps=False, mtime=mtime,
908 operation="merge", root_config=root_config,
909 type_name="binary")
910 self.package_db[pkg] = db_pkg
911
912 seconds = time.time() - start
913 if "--quiet" not in emerge.opts:
914 print "Prebuilt DB populated in %dm%.1fs" % (seconds / 60, seconds % 60)
915
916 return prebuilt_pkgs
917
918 def AddRemainingPackages():
919 """Fill in packages that don't have entries in the package db.
920
921 Every package we are installing needs an entry in the package db.
922 This function should only be called after we have removed the
923 packages that are not being merged from our deps_map.
924 """
925 for pkg in deps_map:
926 if pkg not in self.package_db:
927 if deps_map[pkg]["action"] != "merge":
928 # We should only fill in packages that are being merged. If
929 # there's any other packages here, something funny is going on.
930 print "Missing entry for %s in package db" % pkg
931 sys.exit(1)
932
933 db_pkg = emerge.depgraph._pkg(pkg, "ebuild", emerge.root_config)
934 self.package_db[pkg] = db_pkg
935
936 ReverseTree(deps_tree)
937 BuildFinalPackageSet()
938 AddSecretDeps()
939
940 if self.no_workon_deps:
941 for pkg in self.mandatory_source.copy():
942 for db_pkg in final_db.match_pkgs(pkg):
943 deps_map[str(db_pkg.cpv)]["mandatory_source"] = True
110 else: 944 else:
111 # Not a package name, so pass through to emerge. 945 for pkg in self.mandatory_source.copy():
112 emerge_args.append(arg) 946 for db_pkg in final_db.match_pkgs(pkg):
113 947 MergeChildren(str(db_pkg.cpv), "mandatory_source")
114 emerge_action, emerge_opts, emerge_files = _emerge.main.parse_opts( 948
115 emerge_args) 949 cycles = FindCycles()
116 950 if self.rebuild:
117 return myopts, emerge_action, emerge_opts, emerge_files 951 local_pkgs = LocalPackageDatabase()
118 952 remote_pkgs = RemotePackageDatabase()
119 953 AutoRebuildDeps(local_pkgs, remote_pkgs, cycles)
120 def EmergeCommand(): 954
121 """Helper function to return the base emerge commandline. 955 # We need to remove installed packages so that we can use the dependency
122 956 # ordering of the install process to show us what cycles to crack. Once
123 This is configured for board type, and including pass thru args, 957 # we've done that, we also need to recalculate our list of cycles so that
124 using global variables. TODO(): Unglobalfy. 958 # we don't include the installed packages in our cycles.
125 Returns: 959 RemoveInstalledPackages()
126 string containing emerge command. 960 cycles = FindCycles()
127 """ 961 SanitizeTree(cycles)
128 emerge = "emerge" 962 if deps_map:
129 if "board" in OPTS: 963 if "--usepkg" in emerge.opts:
130 emerge += "-" + OPTS["board"] 964 UsePrebuiltPackages()
131 cmd = [emerge] 965 AddRemainingPackages()
132 for key, val in EMERGE_OPTS.items(): 966 return deps_map
133 if val is True: 967
134 cmd.append(key) 968 def PrintInstallPlan(self, deps_map):
135 else: 969 """Print an emerge-style install plan.
136 cmd.extend([key, str(val)]) 970
137 return " ".join(cmd) 971 The install plan lists what packages we're installing, in order.
138 972 It's useful for understanding what parallel_emerge is doing.
139 973
140 def GetDepsFromPortage(package):
141 """Get dependency tree info by running emerge.
142
143 Run 'emerge -p --debug package', and get a text output of all deps.
144 TODO(): Put dep calculation in a library, as cros_extract_deps
145 also uses this code.
146 Args:
147 package: String containing the packages to build.
148 Returns:
149 Text output of emerge -p --debug, which can be processed elsewhere.
150 """
151 print "Calculating deps for package %s" % package
152 cmdline = (EmergeCommand() + " -p --debug --color=n --with-bdeps=y " +
153 "--selective=n " + package)
154 if OPTS["workon"]:
155 cmdline += " " + " ".join(OPTS["workon"])
156 print "+ %s" % cmdline
157
158 # Store output in a temp file as it is too big for a unix pipe.
159 stderr_buffer = tempfile.TemporaryFile()
160 stdout_buffer = tempfile.TemporaryFile()
161 # Launch the subprocess.
162 start = time.time()
163 depsproc = subprocess.Popen(shlex.split(str(cmdline)), stderr=stderr_buffer,
164 stdout=stdout_buffer, bufsize=64*1024)
165 depsproc.wait()
166 seconds = time.time() - start
167 print "Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60)
168 stderr_buffer.seek(0)
169 stderr_raw = stderr_buffer.read()
170 info_start = stderr_raw.find("digraph")
171 stdout_buffer.seek(0)
172 stdout_raw = stdout_buffer.read()
173 lines = []
174 if info_start != -1:
175 lines = stderr_raw[info_start:].split("\n")
176 lines.extend(stdout_raw.split("\n"))
177 if VERBOSE or depsproc.returncode != 0:
178 output = stderr_raw + stdout_raw
179 print output
180 if depsproc.returncode != 0:
181 print "Failed to generate deps"
182 sys.exit(1)
183
184 return lines
185
186
187 def DepsToTree(lines):
188 """Regex the output from 'emerge --debug' to generate a nested dict of deps.
189
190 Args:
191 lines: Output from 'emerge -p --debug package'.
192 Returns:
193 dep_tree: Nested dict of dependencies, as specified by emerge.
194 There may be dupes, or circular deps.
195
196 We need to regex lines as follows:
197 hard-host-depends depends on
198 ('ebuild', '/', 'dev-lang/swig-1.3.36', 'merge') depends on
199 ('ebuild', '/', 'dev-lang/perl-5.8.8-r8', 'merge') (buildtime)
200 ('binary', '/.../rootfs/', 'sys-auth/policykit-0.9-r1', 'merge') depends on
201 ('binary', '/.../rootfs/', 'x11-misc/xbitmaps-1.1.0', 'merge') (no children)
202 """
203
204 re_deps = re.compile(r"(?P<indent>\W*)\(\'(?P<pkgtype>\w+)\', "
205 r"\'(?P<destination>[\w/\.-]+)\',"
206 r" \'(?P<pkgdir>[\w\+-]+)/(?P<pkgname>[\w\+-]+)-"
207 r"(?P<version>\d+[\w\.-]*)\', \'(?P<action>\w+)\'\) "
208 r"(?P<deptype>(depends on|\(.*\)))")
209 re_origdeps = re.compile(r"(?P<pkgname>[\w\+/=.<>~*-]+) depends on")
210 re_installed_package = re.compile(
211 r"\[(?P<desc>[^\]]*)\] "
212 r"(?P<pkgdir>[\w\+-]+)/"
213 r"(?P<pkgname>[\w\+-]+)-"
214 r"(?P<version>\d+[\w\.-]*)( \["
215 r"(?P<oldversion>\d+[\w\.-]*)\])?"
216 )
217 re_failed = re.compile(r".*\) depends on.*")
218 deps_tree = {}
219 deps_stack = []
220 deps_info = {}
221 for line in lines:
222 m = re_deps.match(line)
223 m_orig = re_origdeps.match(line)
224 m_installed = re_installed_package.match(line)
225 if m:
226 pkgname = m.group("pkgname")
227 pkgdir = m.group("pkgdir")
228 pkgtype = m.group("pkgtype")
229 indent = m.group("indent")
230 doins = m.group("action")
231 deptype = m.group("deptype")
232 depth = 1
233 if not indent:
234 depth = 0
235 version = m.group("version")
236
237 # If we are indented, we should have
238 # found a "depends on" previously.
239 if len(deps_stack) < depth:
240 print "FAIL: corrupt input at:"
241 print line
242 print "No Parent."
243 sys.exit(1)
244
245 # Go step by step through stack and tree
246 # until we find our parent.
247 updatedep = deps_tree
248 for i in range(0, depth):
249 updatedep = updatedep[deps_stack[i]]["deps"]
250
251 # Pretty print what we've captured.
252 indent = "|" + "".ljust(depth, "_")
253 fullpkg = "%s/%s-%s" % (pkgdir, pkgname, version)
254 if VERBOSE:
255 print ("" + indent + " " + pkgdir + "/" + pkgname + " - " +
256 version + " (" + pkgtype + ", " + doins +
257 ", " + deptype + ")")
258
259 # Add our new package into the tree, if it's not already there.
260 updatedep.setdefault(fullpkg, {})
261 # Add an empty deps for this new package.
262 updatedep[fullpkg].setdefault("deps", {})
263 # Add the action we should take (merge, nomerge).
264 updatedep[fullpkg].setdefault("action", doins)
265 # Add the type of dep.
266 updatedep[fullpkg].setdefault("deptype", deptype)
267 # Add the long name of the package
268 updatedep[fullpkg].setdefault("pkgpath", "%s/%s" % (pkgdir, pkgname))
269 # Add the short name of the package
270 updatedep[fullpkg].setdefault("pkgname", pkgname)
271
272 # Drop any stack entries below our depth.
273 deps_stack = deps_stack[0:depth]
274 # Add ourselves to the end of the stack.
275 deps_stack.append(fullpkg)
276 elif m_orig:
277 # Also capture "pseudo packages", which are the freeform test
278 # we requested to be installed. These are generic package names
279 # like "chromeos" rather than chromeos/chromeos-0.0.1
280 depth = 0
281 # Tag these with "original" in case they overlap with real packages.
282 pkgname = "original-%s" % m_orig.group("pkgname")
283 # Insert this into the deps tree so so we can stick it in "world"
284 updatedep = deps_tree
285 for i in range(0, depth):
286 updatedep = updatedep[deps_stack[i]]["deps"]
287 if VERBOSE:
288 print pkgname
289 # Add our new package into the tree, if it's not already there.
290 updatedep.setdefault(pkgname, {})
291 updatedep[pkgname].setdefault("deps", {})
292 # Add the type of dep.
293 updatedep[pkgname].setdefault("action", "world")
294 updatedep[pkgname].setdefault("deptype", "normal")
295 updatedep[pkgname].setdefault("pkgpath", None)
296 updatedep[pkgname].setdefault("pkgname", None)
297
298 # Drop any obsolete stack entries.
299 deps_stack = deps_stack[0:depth]
300 # Add ourselves to the end of the stack.
301 deps_stack.append(pkgname)
302 elif m_installed:
303 pkgname = m_installed.group("pkgname")
304 pkgdir = m_installed.group("pkgdir")
305 version = m_installed.group("version")
306 oldversion = m_installed.group("oldversion")
307 desc = m_installed.group("desc")
308 uninstall = False
309 if oldversion and (desc.find("U") != -1 or desc.find("D") != -1):
310 uninstall = True
311 replace = desc.find("R") != -1
312 fullpkg = "%s/%s-%s" % (pkgdir, pkgname, version)
313 deps_info[fullpkg] = {"idx": len(deps_info),
314 "pkgdir": pkgdir,
315 "pkgname": pkgname,
316 "oldversion": oldversion,
317 "uninstall": uninstall,
318 "replace": replace}
319 else:
320 # Is this a package that failed to match our huge regex?
321 m = re_failed.match(line)
322 if m:
323 print "\n".join(lines)
324 print "FAIL: Couldn't understand line:"
325 print line
326 sys.exit(1)
327
328 return deps_tree, deps_info
329
330
331 def PrintTree(deps, depth=""):
332 """Print the deps we have seen in the emerge output.
333
334 Args:
335 deps: Dependency tree structure.
336 depth: Allows printing the tree recursively, with indentation.
337 """
338 for entry in deps:
339 action = deps[entry]["action"]
340 print "%s %s (%s)" % (depth, entry, action)
341 PrintTree(deps[entry]["deps"], depth=depth + " ")
342
343
344 def GenDependencyGraph(deps_tree, deps_info, package_names):
345 """Generate a doubly linked dependency graph.
346
347 Args:
348 deps_tree: Dependency tree structure.
349 deps_info: More details on the dependencies.
350 package_names: Names of packages to add to the world file.
351 Returns:
352 Deps graph in the form of a dict of packages, with each package
353 specifying a "needs" list and "provides" list.
354 """
355 deps_map = {}
356 pkgpaths = {}
357
358 def ReverseTree(packages):
359 """Convert tree to digraph.
360
361 Take the tree of package -> requirements and reverse it to a digraph of
362 buildable packages -> packages they unblock.
363 Args: 974 Args:
364 packages: Tree(s) of dependencies. 975 deps_map: The dependency graph.
365 Returns:
366 Unsanitized digraph.
367 """ 976 """
368 for pkg in packages: 977
369 action = packages[pkg]["action"] 978 def InstallPlanAtNode(target, deps_map):
370 pkgpath = packages[pkg]["pkgpath"] 979 nodes = []
371 pkgname = packages[pkg]["pkgname"] 980 nodes.append(target)
372 pkgpaths[pkgpath] = pkg 981 for dep in deps_map[target]["provides"]:
373 pkgpaths[pkgname] = pkg 982 del deps_map[dep]["needs"][target]
374 this_pkg = deps_map.setdefault( 983 if not deps_map[dep]["needs"]:
375 pkg, {"needs": {}, "provides": set(), "action": "nomerge", 984 nodes.extend(InstallPlanAtNode(dep, deps_map))
376 "workon": False, "cmdline": False}) 985 return nodes
377 if action != "nomerge": 986
378 this_pkg["action"] = action 987 deps_map = copy.deepcopy(deps_map)
379 this_pkg["deps_info"] = deps_info.get(pkg) 988 install_plan = []
380 ReverseTree(packages[pkg]["deps"]) 989 plan = set()
381 for dep, dep_item in packages[pkg]["deps"].items(): 990 for target, info in deps_map.iteritems():
382 dep_pkg = deps_map[dep] 991 if not info["needs"] and target not in plan:
383 dep_type = dep_item["deptype"] 992 for item in InstallPlanAtNode(target, deps_map):
384 if dep_type != "(runtime_post)": 993 plan.add(item)
385 dep_pkg["provides"].add(pkg) 994 install_plan.append(self.package_db[item])
386 this_pkg["needs"][dep] = dep_type 995
387 996 self.emerge.depgraph.display(install_plan)
388 def RemoveInstalledPackages():
389 """Remove installed packages, propagating dependencies."""
390
391 if "--selective" in EMERGE_OPTS:
392 selective = EMERGE_OPTS["--selective"] != "n"
393 else:
394 selective = "--noreplace" in EMERGE_OPTS or "--update" in EMERGE_OPTS
395 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
396 for pkg, info in deps_info.items():
397 if selective and not deps_map[pkg]["workon"] and info["replace"]:
398 rm_pkgs.add(pkg)
399 for pkg in rm_pkgs:
400 this_pkg = deps_map[pkg]
401 if this_pkg["cmdline"] and "--oneshot" not in EMERGE_OPTS:
402 # If "cmdline" is set, this is a world update that was passed on the
403 # command-line. Keep these unless we're in --oneshot mode.
404 continue
405 needs = this_pkg["needs"]
406 provides = this_pkg["provides"]
407 for dep in needs:
408 dep_provides = deps_map[dep]["provides"]
409 dep_provides.update(provides)
410 dep_provides.discard(pkg)
411 dep_provides.discard(dep)
412 for target in provides:
413 target_needs = deps_map[target]["needs"]
414 target_needs.update(needs)
415 if pkg in target_needs:
416 del target_needs[pkg]
417 if target in target_needs:
418 del target_needs[target]
419 del deps_map[pkg]
420
421 def SanitizeDep(basedep, currdep, visited, cycle):
422 """Search for circular deps between basedep and currdep, then recurse.
423
424 Args:
425 basedep: Original dependency, top of stack.
426 currdep: Bottom of our current recursion, bottom of stack.
427 visited: Nodes visited so far.
428 cycle: Array where cycle of circular dependencies should be stored.
429 TODO(): Break RDEPEND preferentially.
430 Returns:
431 True iff circular dependencies are found.
432 """
433 if currdep not in visited:
434 visited.add(currdep)
435 for dep in deps_map[currdep]["needs"]:
436 if dep == basedep or SanitizeDep(basedep, dep, visited, cycle):
437 cycle.insert(0, dep)
438 return True
439 return False
440
441 def SanitizeTree():
442 """Remove circular dependencies."""
443 start = time.time()
444 for basedep in deps_map:
445 this_pkg = deps_map[basedep]
446 if this_pkg["action"] == "world":
447 # world file updates can't be involved in cycles,
448 # and they don't have deps_info, so skip them.
449 continue
450 for dep in this_pkg["needs"].copy():
451 cycle = []
452 if (deps_info[basedep]["idx"] <= deps_info[dep]["idx"] and
453 SanitizeDep(basedep, dep, set(), cycle)):
454 cycle[:0] = [basedep, dep]
455 print "Breaking cycle:"
456 for i in range(len(cycle) - 1):
457 deptype = deps_map[cycle[i]]["needs"][cycle[i+1]]
458 print " %s -> %s %s" % (cycle[i], cycle[i+1], deptype)
459 del this_pkg["needs"][dep]
460 deps_map[dep]["provides"].remove(basedep)
461 seconds = time.time() - start
462 print "Tree sanitized in %d:%04.1fs" % (seconds / 60, seconds % 60)
463
464 def AddSecretDeps():
465 """Find these tagged packages and add extra dependencies.
466
467 For debugging dependency problems.
468 """
469 for bad in secret_deps:
470 needed = secret_deps[bad]
471 bad_pkg = None
472 needed_pkg = None
473 for dep in deps_map:
474 if dep.find(bad) != -1:
475 bad_pkg = dep
476 if dep.find(needed) != -1:
477 needed_pkg = dep
478 if bad_pkg and needed_pkg:
479 deps_map[needed_pkg]["provides"].add(bad_pkg)
480 deps_map[bad_pkg]["needs"].add(needed_pkg)
481
482 def WorkOnChildren(pkg):
483 """Mark this package and all packages it provides as workon packages."""
484
485 this_pkg = deps_map[pkg]
486 if this_pkg["workon"]:
487 return False
488
489 this_pkg["workon"] = True
490 updated = False
491 for w in this_pkg["provides"]:
492 if WorkOnChildren(w):
493 updated = True
494
495 if this_pkg["action"] == "nomerge":
496 pkgpath = deps_tree[pkg]["pkgpath"]
497 if pkgpath is not None:
498 OPTS["workon"].add(pkgpath)
499 updated = True
500
501 return updated
502
503 ReverseTree(deps_tree)
504 AddSecretDeps()
505
506 if "no-workon-deps" in OPTS:
507 for pkgpath in OPTS["workon"].copy():
508 pkg = pkgpaths[pkgpath]
509 deps_map[pkg]["workon"] = True
510 else:
511 mergelist_updated = False
512 for pkgpath in OPTS["workon"].copy():
513 pkg = pkgpaths[pkgpath]
514 if WorkOnChildren(pkg):
515 mergelist_updated = True
516 if mergelist_updated:
517 print "List of packages to merge updated. Recalculate dependencies..."
518 return None
519
520 for pkgpath in package_names:
521 dep_pkg = deps_map.get("original-%s" % pkgpath)
522 if dep_pkg and len(dep_pkg["needs"]) == 1:
523 dep_pkg["cmdline"] = True
524
525 RemoveInstalledPackages()
526 SanitizeTree()
527 return deps_map
528 997
529 998
530 def PrintDepsMap(deps_map): 999 def PrintDepsMap(deps_map):
531 """Print dependency graph, for each package list it's prerequisites.""" 1000 """Print dependency graph, for each package list it's prerequisites."""
532 for i in deps_map: 1001 for i in deps_map:
533 print "%s: (%s) needs" % (i, deps_map[i]["action"]) 1002 print "%s: (%s) needs" % (i, deps_map[i]["action"])
534 for j in deps_map[i]["needs"]: 1003 needs = deps_map[i]["needs"]
1004 for j in needs:
535 print " %s" % (j) 1005 print " %s" % (j)
1006 if not needs:
1007 print " no dependencies"
1008
1009
1010 def EmergeWorker(task_queue, done_queue, emerge, package_db):
1011 """This worker emerges any packages given to it on the task_queue.
1012
1013 Args:
1014 task_queue: The queue of tasks for this worker to do.
1015 done_queue: The queue of results from the worker.
1016 emerge: An EmergeData() object.
1017 package_db: A dict, mapping package ids to portage Package objects.
1018
1019 It expects package identifiers to be passed to it via task_queue. When
1020 the package is merged, it pushes (target, retval, outputstr) into the
1021 done_queue.
1022 """
1023
1024 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb
1025 opts, spinner = emerge.opts, emerge.spinner
1026 opts["--nodeps"] = True
1027 while True:
1028 target = task_queue.get()
1029 print "Emerging", target
Nick Sanders 2010/07/20 05:43:40 Can you mention that get is blocking
1030 db_pkg = package_db[target]
1031 db_pkg.root_config = emerge.root_config
1032 install_list = [db_pkg]
1033 output = tempfile.TemporaryFile()
1034 outputstr = ""
1035 if "--pretend" in opts:
1036 retval = 0
1037 else:
1038 save_stdout = sys.stdout
1039 save_stderr = sys.stderr
1040 try:
1041 sys.stdout = output
1042 sys.stderr = output
1043 scheduler = Scheduler(settings, trees, mtimedb, opts, spinner,
1044 install_list, [], emerge.scheduler_graph)
1045 retval = scheduler.merge()
1046 finally:
1047 sys.stdout = save_stdout
1048 sys.stderr = save_stderr
1049 if retval is None:
1050 retval = 0
1051 if retval != 0:
1052 output.seek(0)
1053 outputstr = output.read()
1054
1055 done_queue.put((target, retval, outputstr))
536 1056
537 1057
538 class EmergeQueue(object): 1058 class EmergeQueue(object):
539 """Class to schedule emerge jobs according to a dependency graph.""" 1059 """Class to schedule emerge jobs according to a dependency graph."""
540 1060
541 def __init__(self, deps_map): 1061 def __init__(self, deps_map, emerge, package_db):
542 # Store the dependency graph. 1062 # Store the dependency graph.
543 self._deps_map = deps_map 1063 self._deps_map = deps_map
544 # Initialize the runnable queue to empty. 1064 # Initialize the running queue to empty
545 self._jobs = [] 1065 self._jobs = set()
546 # List of total package installs represented in deps_map. 1066 # List of total package installs represented in deps_map.
547 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"] 1067 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"]
548 self._total_jobs = len(install_jobs) 1068 self._total_jobs = len(install_jobs)
549 1069
550 # Initialize the ready queue, these are jobs with no unmet dependencies. 1070 if "--pretend" in emerge.opts:
551 self._emerge_queue = [x for x in deps_map if not deps_map[x]["needs"]] 1071 print "Skipping merge because of --pretend mode."
1072 sys.exit(0)
1073
1074 # Setup scheduler graph object. This is used by the child processes
1075 # to help schedule jobs.
1076 emerge.scheduler_graph = emerge.depgraph.schedulerGraph()
1077
1078 procs = min(self._total_jobs,
1079 emerge.opts.get("--jobs", multiprocessing.cpu_count()))
1080 self._emerge_queue = multiprocessing.Queue()
1081 self._done_queue = multiprocessing.Queue()
1082 args = (self._emerge_queue, self._done_queue, emerge, package_db)
1083 self._pool = multiprocessing.Pool(procs, EmergeWorker, args)
1084
552 # Initialize the failed queue to empty. 1085 # Initialize the failed queue to empty.
553 self._retry_queue = [] 1086 self._retry_queue = []
554 self._failed = {} 1087 self._failed = {}
555 1088
1089 # Print an update before we launch the merges.
1090 self._Status()
1091
1092 for target, info in deps_map.items():
1093 if not info["needs"]:
1094 self._Schedule(target)
1095
1096 def _Schedule(self, target):
1097 # We maintain a tree of all deps, if this doesn't need
1098 # to be installed just free up it's children and continue.
1099 # It is possible to reinstall deps of deps, without reinstalling
1100 # first level deps, like so:
1101 # chromeos (merge) -> eselect (nomerge) -> python (merge)
1102 if self._deps_map[target]["action"] == "nomerge":
1103 self._Finish(target)
1104 else:
1105 # Kick off the build if it's marked to be built.
1106 self._jobs.add(target)
1107 self._emerge_queue.put(target)
1108
556 def _LoadAvg(self): 1109 def _LoadAvg(self):
557 loads = open("/proc/loadavg", "r").readline().split()[:3] 1110 loads = open("/proc/loadavg", "r").readline().split()[:3]
558 return " ".join(loads) 1111 return " ".join(loads)
559 1112
560 def _Status(self): 1113 def _Status(self):
561 """Print status.""" 1114 """Print status."""
562 seconds = time.time() - GLOBAL_START 1115 seconds = time.time() - GLOBAL_START
563 line = ("Pending %s, Ready %s, Running %s, Retrying %s, Total %s " 1116 line = ("Pending %s, Ready %s, Running %s, Retrying %s, Total %s "
564 "[Time %dm%ds Load %s]") 1117 "[Time %dm%.1fs Load %s]")
565 print line % (len(self._deps_map), len(self._emerge_queue), 1118 qsize = self._emerge_queue.qsize()
566 len(self._jobs), len(self._retry_queue), self._total_jobs, 1119 print line % (len(self._deps_map), qsize, len(self._jobs) - qsize,
1120 len(self._retry_queue), self._total_jobs,
567 seconds / 60, seconds % 60, self._LoadAvg()) 1121 seconds / 60, seconds % 60, self._LoadAvg())
568 1122
569 def _LaunchOneEmerge(self, target, action):
570 """Run emerge --nodeps to do a single package install.
571
572 If this is a pseudopackage, that means we're done, and can select in in the
573 world file.
574 Args:
575 target: The full package name of the package to install.
576 eg. "sys-apps/portage-2.17"
577 Returns:
578 Triplet containing (target name, subprocess object, output buffer object).
579 """
580 if target.startswith("original-"):
581 # "original-" signifies one of the packages we originally requested.
582 # Since we have explicitly installed the versioned package as a dep of
583 # this, we only need to tag in "world" that we are done with this
584 # install request.
585 # --nodeps: Ignore dependencies -- we handle them internally.
586 # --noreplace: Don't replace or upgrade any packages. (In this case, the
587 # package is already installed, so we are just updating the
588 # world file.)
589 # --selective: Make sure that --noreplace sticks even if --selective=n is
590 # specified by the user on the command-line.
591 # NOTE: If the user specifies --oneshot on the command-line, this command
592 # will do nothing. That is desired, since the user requested not to
593 # update the world file.
594 newtarget = target.replace("original-", "")
595 cmdline = (EmergeCommand() + " --nodeps --selective --noreplace " +
596 newtarget)
597 elif action == "uninstall":
598 cmdline = EmergeCommand() + " --nodeps --unmerge =" + target
599 else:
600 # This package is a dependency of something we specifically
601 # requested. Therefore we should install it but not allow it
602 # in the "world" file, which represents explicit installs.
603 # --oneshot" here will prevent it from being tagged in world.
604 cmdline = EmergeCommand() + " --nodeps --oneshot "
605 this_pkg = self._deps_map[target]
606 if this_pkg["workon"]:
607 # --usepkg=n --usepkgonly=n --getbinpkg=n
608 # --getbinpkgonly=n: Build from source
609 # --selective=n: Re-emerge even if package is already installed.
610 cmdline += ("--usepkg=n --usepkgonly=n --getbinpkg=n "
611 "--getbinpkgonly=n --selective=n ")
612 cmdline += "=" + target
613 deps_info = this_pkg["deps_info"]
614 if deps_info["uninstall"]:
615 package = "%(pkgdir)s/%(pkgname)s-%(oldversion)s" % deps_info
616 cmdline += " && %s -C =%s" % (EmergeCommand(), package)
617
618 print "+ %s" % cmdline
619
620 # Store output in a temp file as it is too big for a unix pipe.
621 stdout_buffer = tempfile.TemporaryFile()
622 # Modify the environment to disable locking.
623 portage_env = os.environ.copy()
624 portage_env["PORTAGE_LOCKS"] = "false"
625 portage_env["UNMERGE_DELAY"] = "0"
626 # Autoclean rummages around in the portage database and uninstalls
627 # old packages. It's not parallel safe, so we skip it. Instead, we
628 # handle the cleaning ourselves by uninstalling old versions of any
629 # new packages we install.
630 if not AUTOCLEAN:
631 portage_env["AUTOCLEAN"] = "no"
632 # Launch the subprocess.
633 emerge_proc = subprocess.Popen(
634 cmdline, shell=True, stdout=stdout_buffer,
635 stderr=subprocess.STDOUT, bufsize=64*1024, env=portage_env)
636
637 return (target, emerge_proc, stdout_buffer)
638
639 def _Finish(self, target): 1123 def _Finish(self, target):
640 """Mark a target as completed and unblock dependecies.""" 1124 """Mark a target as completed and unblock dependecies."""
641 for dep in self._deps_map[target]["provides"]: 1125 for dep in self._deps_map[target]["provides"]:
642 del self._deps_map[dep]["needs"][target] 1126 del self._deps_map[dep]["needs"][target]
643 if not self._deps_map[dep]["needs"]: 1127 if not self._deps_map[dep]["needs"]:
644 if VERBOSE: 1128 self._Schedule(dep)
645 print "Unblocking %s" % dep
646 self._emerge_queue.append(dep)
647 self._deps_map.pop(target) 1129 self._deps_map.pop(target)
648 1130
649 def _Retry(self): 1131 def _Retry(self):
650 if self._retry_queue: 1132 if self._retry_queue:
651 target = self._retry_queue.pop(0) 1133 target = self._retry_queue.pop(0)
652 self._emerge_queue.append(target) 1134 self._Schedule(target)
653 print "Retrying emerge of %s." % target 1135 print "Retrying emerge of %s." % target
654 1136
655 def Run(self): 1137 def Run(self):
656 """Run through the scheduled ebuilds. 1138 """Run through the scheduled ebuilds.
657 1139
658 Keep running so long as we have uninstalled packages in the 1140 Keep running so long as we have uninstalled packages in the
659 dependency graph to merge. 1141 dependency graph to merge.
660 """ 1142 """
661 secs = 0
662 max_jobs = EMERGE_OPTS.get("--jobs", 256)
663 while self._deps_map: 1143 while self._deps_map:
664 # If we have packages that are ready, kick them off.
665 if self._emerge_queue and len(self._jobs) < max_jobs:
666 target = self._emerge_queue.pop(0)
667 action = self._deps_map[target]["action"]
668 # We maintain a tree of all deps, if this doesn't need
669 # to be installed just free up it's children and continue.
670 # It is possible to reinstall deps of deps, without reinstalling
671 # first level deps, like so:
672 # chromeos (merge) -> eselect (nomerge) -> python (merge)
673 if action == "nomerge":
674 self._Finish(target)
675 else:
676 # Kick off the build if it's marked to be built.
677 print "Emerging %s (%s)" % (target, action)
678 job = self._LaunchOneEmerge(target, action)
679 # Append it to the active jobs list.
680 self._jobs.append(job)
681 continue
682 # Wait a bit to see if maybe some jobs finish. You can't
683 # wait on a set of jobs in python, so we'll just poll.
684 time.sleep(1)
685 secs += 1
686 if secs % 30 == 0:
687 # Print an update.
688 self._Status()
689
690 # Check here that we are actually waiting for something. 1144 # Check here that we are actually waiting for something.
691 if (not self._emerge_queue and 1145 if (self._emerge_queue.empty() and
1146 self._done_queue.empty() and
692 not self._jobs and 1147 not self._jobs and
693 self._deps_map): 1148 self._deps_map):
694 # If we have failed on a package, retry it now. 1149 # If we have failed on a package, retry it now.
695 if self._retry_queue: 1150 if self._retry_queue:
696 self._Retry() 1151 self._Retry()
697 # If we have failed a package twice, just give up. 1152 # If we have failed a package twice, just give up.
698 elif self._failed: 1153 elif self._failed:
699 for failure, output in self._failed.items(): 1154 for failure, output in self._failed.items():
700 print "Package failed: %s" % failure 1155 print "Package failed: %s" % failure
701 print output 1156 print output
702 PrintDepsMap(self._deps_map) 1157 PrintDepsMap(self._deps_map)
703 print "Packages failed: %s" % " ,".join(self._failed.keys()) 1158 print "Packages failed: %s" % " ,".join(self._failed.keys())
704 sys.exit(1) 1159 sys.exit(1)
705 # If we have dependency cycles. 1160 # If we have dependency cycles.
706 else: 1161 else:
707 print "Deadlock! Circular dependencies!" 1162 print "Deadlock! Circular dependencies!"
708 PrintDepsMap(self._deps_map) 1163 PrintDepsMap(self._deps_map)
709 sys.exit(1) 1164 sys.exit(1)
710 1165
711 # Check every running job to see if we've finished any jobs. 1166 try:
712 for target, job, stdout in self._jobs: 1167 target, retcode, output = self._done_queue.get(timeout=5)
713 # Is it done? 1168 except Queue.Empty:
714 if job.poll() is not None: 1169 # Print an update.
715 # Clean up the subprocess. 1170 self._Status()
716 job.wait() 1171 continue
717 # Get the output if we want to print it.
718 stdout.seek(0)
719 output = stdout.read()
720 1172
721 # Remove from active jobs list, we are done with this process. 1173 self._jobs.discard(target)
722 self._jobs.remove((target, job, stdout))
723 1174
724 # Print if necessary. 1175 # Print if necessary.
725 if VERBOSE or job.returncode != 0: 1176 if retcode != 0:
726 print output 1177 print output
727 if job.returncode != 0: 1178 if retcode != 0:
728 # Handle job failure. 1179 # Handle job failure.
729 if target in self._failed: 1180 if target in self._failed:
730 # If this job has failed previously, give up. 1181 # If this job has failed previously, give up.
731 print "Failed %s. Your build has failed." % target 1182 print "Failed %s. Your build has failed." % target
732 else: 1183 else:
733 # Queue up this build to try again after a long while. 1184 # Queue up this build to try again after a long while.
734 self._retry_queue.append(target) 1185 self._retry_queue.append(target)
735 self._failed[target] = output 1186 self._failed[target] = 1
736 print "Failed %s, retrying later." % target 1187 print "Failed %s, retrying later." % target
737 else: 1188 else:
738 if target in self._failed and self._retry_queue: 1189 if target in self._failed and self._retry_queue:
739 # If we have successfully retried a failed package, and there 1190 # If we have successfully retried a failed package, and there
740 # are more failed packages, try the next one. We will only have 1191 # are more failed packages, try the next one. We will only have
741 # one retrying package actively running at a time. 1192 # one retrying package actively running at a time.
742 self._Retry() 1193 self._Retry()
743 1194
744 print "Completed %s" % target 1195 print "Completed %s" % target
745 # Mark as completed and unblock waiting ebuilds. 1196 # Mark as completed and unblock waiting ebuilds.
746 self._Finish(target) 1197 self._Finish(target)
747 1198
748 # Print an update. 1199 # Print an update.
749 self._Status() 1200 self._Status()
750 1201
751 1202
752 # Main control code. 1203 def main():
753 OPTS, EMERGE_ACTION, EMERGE_OPTS, EMERGE_FILES = ParseArgs(sys.argv)
754 1204
755 if EMERGE_ACTION is not None: 1205 deps = DepGraphGenerator()
756 # Pass action arguments straight through to emerge 1206 deps.Initialize(sys.argv[1:])
757 EMERGE_OPTS["--%s" % EMERGE_ACTION] = True 1207 emerge = deps.emerge
758 sys.exit(os.system(EmergeCommand() + " " + " ".join(EMERGE_FILES)))
759 elif not EMERGE_FILES:
760 Usage()
761 sys.exit(1)
762 1208
763 print "Starting fast-emerge." 1209 if emerge.action is not None:
764 print " Building package %s on %s" % (" ".join(EMERGE_FILES), 1210 sys.argv = deps.ParseParallelEmergeArgs(sys.argv)
765 OPTS.get("board", "root")) 1211 sys.exit(emerge_main())
1212 elif not emerge.cmdline_packages:
1213 Usage()
1214 sys.exit(1)
766 1215
767 # If the user supplied the --workon option, we may have to run emerge twice 1216 # Unless we're in pretend mode, there's not much point running without
768 # to generate a dependency ordering for packages that depend on the workon 1217 # root access. We need to be able to install packages.
769 # packages. 1218 #
770 for it in range(2): 1219 # NOTE: Even if you're running --pretend, it's a good idea to run
771 print "Running emerge to generate deps" 1220 # parallel_emerge with root access so that portage can write to the
772 deps_output = GetDepsFromPortage(" ".join(EMERGE_FILES)) 1221 # dependency cache. This is important for performance.
1222 if "--pretend" not in emerge.opts and portage.secpass < 2:
1223 print "parallel_emerge: superuser access is required."
1224 sys.exit(1)
773 1225
774 print "Processing emerge output" 1226 if "--quiet" not in emerge.opts:
775 dependency_tree, dependency_info = DepsToTree(deps_output) 1227 cmdline_packages = " ".join(emerge.cmdline_packages)
1228 print "Starting fast-emerge."
1229 print " Building package %s on %s" % (cmdline_packages,
1230 deps.board or "root")
776 1231
777 if VERBOSE: 1232 deps_tree, deps_info = deps.GenDependencyTree()
778 print "Print tree"
779 PrintTree(dependency_tree)
780 1233
781 print "Generate dependency graph." 1234 # You want me to be verbose? I'll give you two trees! Twice as much value.
782 dependency_graph = GenDependencyGraph(dependency_tree, dependency_info, 1235 if "--tree" in emerge.opts and "--verbose" in emerge.opts:
783 EMERGE_FILES) 1236 deps.PrintTree(deps_tree)
784 1237
785 if dependency_graph is not None: 1238 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
786 break
787 else:
788 print "Can't crack cycle"
789 sys.exit(1)
790 1239
791 if VERBOSE: 1240 # OK, time to print out our progress so far.
792 PrintDepsMap(dependency_graph) 1241 deps.PrintInstallPlan(deps_graph)
1242 if "--tree" in emerge.opts:
1243 PrintDepsMap(deps_graph)
793 1244
794 # Run the queued emerges. 1245 # Run the queued emerges.
795 scheduler = EmergeQueue(dependency_graph) 1246 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db)
796 scheduler.Run() 1247 scheduler.Run()
797 1248
798 print "Done" 1249 # Update world.
1250 if ("--oneshot" not in emerge.opts and
1251 "--pretend" not in emerge.opts):
1252 world_set = emerge.root_config.sets["selected"]
1253 new_world_pkgs = []
1254 root = emerge.settings["ROOT"]
1255 final_db = emerge.depgraph._dynamic_config.mydbapi[root]
1256 for pkg in emerge.cmdline_packages:
1257 for db_pkg in final_db.match_pkgs(pkg):
1258 print "Adding %s to world" % db_pkg.cp
1259 new_world_pkgs.append(db_pkg.cp)
1260 if new_world_pkgs:
1261 world_set.update(new_world_pkgs)
799 1262
1263 print "Done"
1264
1265 if __name__ == "__main__":
1266 main()
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698