OLD | NEW |
1 #!/usr/bin/python2.6 | 1 #!/usr/bin/python2.6 |
2 # Copyright (c) 2010 The Chromium OS Authors. All rights reserved. | 2 # Copyright (c) 2010 The Chromium OS Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Program to run emerge in parallel, for significant speedup. | 6 """Program to run emerge in parallel, for significant speedup. |
7 | 7 |
8 Usage: | 8 Usage: |
9 ./parallel_emerge --board=BOARD [emerge args] package | 9 ./parallel_emerge [--board=BOARD] [--workon=PKGS] [--no-workon-deps] |
| 10 [emerge args] package" |
10 | 11 |
11 Basic operation: | 12 Basic operation: |
12 Runs 'emerge -p --debug' to display dependencies, and stores a | 13 Runs 'emerge -p --debug' to display dependencies, and stores a |
13 dependency graph. All non-blocked packages are launched in parallel, | 14 dependency graph. All non-blocked packages are launched in parallel, |
14 as 'emerge --nodeps package' with any blocked packages being emerged | 15 as 'emerge --nodeps package' with any blocked packages being emerged |
15 immediately upon deps being met. | 16 immediately upon deps being met. |
16 | 17 |
17 For this to work effectively, /usr/lib/portage/pym/portage/locks.py | 18 For this to work effectively, /usr/lib/portage/pym/portage/locks.py |
18 must be stubbed out, preventing portage from slowing itself with | 19 must be stubbed out, preventing portage from slowing itself with |
19 unneccesary locking, as this script ensures that emerge is run in such | 20 unneccesary locking, as this script ensures that emerge is run in such |
(...skipping 17 matching lines...) Expand all Loading... |
37 of the same package for a runtime dep). | 38 of the same package for a runtime dep). |
38 """ | 39 """ |
39 | 40 |
40 import os | 41 import os |
41 import re | 42 import re |
42 import shlex | 43 import shlex |
43 import subprocess | 44 import subprocess |
44 import sys | 45 import sys |
45 import tempfile | 46 import tempfile |
46 import time | 47 import time |
| 48 import _emerge.main |
47 | 49 |
48 | 50 |
49 def Usage(): | 51 def Usage(): |
| 52 """Print usage.""" |
50 print "Usage:" | 53 print "Usage:" |
51 print " ./parallel_emerge --board=BOARD --jobs=JOBS [emerge args] package" | 54 print " ./parallel_emerge [--board=BOARD] [--workon=PKGS] [--no-workon-deps]" |
| 55 print " [emerge args] package" |
| 56 print |
| 57 print "Packages specified as workon packages are always built from source." |
| 58 print "Unless --no-workon-deps is specified, packages that depend on these" |
| 59 print "packages are also built from source." |
| 60 print |
| 61 print "The --workon argument is mainly useful when you want to build and" |
| 62 print "install packages that you are working on unconditionally, but do not" |
| 63 print "to have to rev the package to indicate you want to build it from" |
| 64 print "source. The build_packages script will automatically supply the" |
| 65 print "workon argument to emerge, ensuring that packages selected using" |
| 66 print "cros-workon are rebuilt." |
52 sys.exit(1) | 67 sys.exit(1) |
53 | 68 |
54 | 69 |
55 # These are dependencies that are not specified in the package, | 70 # These are dependencies that are not specified in the package, |
56 # but will prevent the package from installing. | 71 # but will prevent the package from installing. |
57 secret_deps = {} | 72 secret_deps = {} |
58 | 73 |
59 # Globals: package we are building, board we are targeting, | |
60 # emerge args we are passing through. | |
61 PACKAGE = None | |
62 EMERGE_ARGS = "" | |
63 BOARD = None | |
64 | |
65 # Runtime flags. TODO(): Maybe make these command-line options or | 74 # Runtime flags. TODO(): Maybe make these command-line options or |
66 # environment variables. | 75 # environment variables. |
67 VERBOSE = False | 76 VERBOSE = False |
68 AUTOCLEAN = False | 77 AUTOCLEAN = False |
69 | 78 |
70 # Global start time | 79 # Global start time |
71 GLOBAL_START = time.time() | 80 GLOBAL_START = time.time() |
72 | 81 |
73 | 82 |
74 def ParseArgs(argv): | 83 def ParseArgs(argv): |
75 """Set global vars based on command line. | 84 """Set global vars based on command line. |
76 | 85 |
77 We need to be compatible with emerge arg format. | 86 We need to be compatible with emerge arg format. |
78 We scrape --board=XXX and --jobs=XXX, and distinguish between args | 87 We scrape arguments that are specific to parallel_emerge, and pass through |
79 and package names. | 88 the rest directly to emerge. |
80 TODO(): Robustify argument processing, as it's possible to | |
81 pass in many two argument parameters that are difficult | |
82 to programmatically identify, although we don't currently | |
83 use any besides --with-bdeps <y|n>. | |
84 Args: | 89 Args: |
85 argv: arguments list | 90 argv: arguments list |
86 Returns: | 91 Returns: |
87 triplet of (package list, emerge argumens, board string) | 92 triplet of (package list, emerge argumens, board string) |
88 """ | 93 """ |
89 if VERBOSE: | 94 if VERBOSE: |
90 print argv | 95 print argv |
91 board_arg = None | 96 workon_set = set() |
92 jobs_arg = 0 | 97 myopts = {} |
93 package_args = [] | 98 myopts["workon"] = workon_set |
94 emerge_passthru_args = "" | 99 emerge_args = [] |
95 for arg in argv[1:]: | 100 for arg in argv[1:]: |
96 # Specifically match "--board=" and "--jobs=". | 101 # Specifically match arguments that are specific to parallel_emerge, and |
| 102 # pass through the rest. |
97 if arg.startswith("--board="): | 103 if arg.startswith("--board="): |
98 board_arg = arg.replace("--board=", "") | 104 myopts["board"] = arg.replace("--board=", "") |
99 elif arg.startswith("--jobs="): | 105 elif arg.startswith("--workon="): |
100 try: | 106 workon_str = arg.replace("--workon=", "") |
101 jobs_arg = int(arg.replace("--jobs=", "")) | 107 workon_set.update(shlex.split(" ".join(shlex.split(workon_str)))) |
102 except ValueError: | 108 elif arg == "--no-workon-deps": |
103 print "Unrecognized argument:", arg | 109 myopts["no-workon-deps"] = True |
104 Usage() | 110 else: |
105 sys.exit(1) | |
106 elif arg.startswith("-") or arg == "y" or arg == "n": | |
107 # Not a package name, so pass through to emerge. | 111 # Not a package name, so pass through to emerge. |
108 emerge_passthru_args = emerge_passthru_args + " " + arg | 112 emerge_args.append(arg) |
109 else: | |
110 package_args.append(arg) | |
111 | 113 |
112 if not package_args and not emerge_passthru_args: | 114 emerge_action, emerge_opts, emerge_files = _emerge.main.parse_opts( |
113 Usage() | 115 emerge_args) |
114 sys.exit(1) | |
115 | 116 |
116 # Default to lots of jobs | 117 return myopts, emerge_action, emerge_opts, emerge_files |
117 if jobs_arg <= 0: | |
118 jobs_arg = 256 | |
119 | |
120 # Set globals. | |
121 return " ".join(package_args), emerge_passthru_args, board_arg, jobs_arg | |
122 | 118 |
123 | 119 |
124 def EmergeCommand(): | 120 def EmergeCommand(): |
125 """Helper function to return the base emerge commandline. | 121 """Helper function to return the base emerge commandline. |
126 | 122 |
127 This is configured for board type, and including pass thru args, | 123 This is configured for board type, and including pass thru args, |
128 using global variables. TODO(): Unglobalfy. | 124 using global variables. TODO(): Unglobalfy. |
129 Returns: | 125 Returns: |
130 string containing emerge command. | 126 string containing emerge command. |
131 """ | 127 """ |
132 emerge = "emerge" | 128 emerge = "emerge" |
133 if BOARD: | 129 if "board" in OPTS: |
134 emerge += "-" + BOARD | 130 emerge += "-" + OPTS["board"] |
135 return emerge + " " + EMERGE_ARGS | 131 cmd = [emerge] |
| 132 for key, val in EMERGE_OPTS.items(): |
| 133 if val is True: |
| 134 cmd.append(key) |
| 135 else: |
| 136 cmd.extend([key, str(val)]) |
| 137 return " ".join(cmd) |
136 | 138 |
137 | 139 |
138 def GetDepsFromPortage(package): | 140 def GetDepsFromPortage(package): |
139 """Get dependency tree info by running emerge. | 141 """Get dependency tree info by running emerge. |
140 | 142 |
141 Run 'emerge -p --debug package', and get a text output of all deps. | 143 Run 'emerge -p --debug package', and get a text output of all deps. |
142 TODO(): Put dep calculation in a library, as cros_extract_deps | 144 TODO(): Put dep calculation in a library, as cros_extract_deps |
143 also uses this code. | 145 also uses this code. |
144 Args: | 146 Args: |
145 package: String containing the packages to build. | 147 package: String containing the packages to build. |
146 Returns: | 148 Returns: |
147 Text output of emerge -p --debug, which can be processed elsewhere. | 149 Text output of emerge -p --debug, which can be processed elsewhere. |
148 """ | 150 """ |
149 print "Calculating deps for package %s" % package | 151 print "Calculating deps for package %s" % package |
150 cmdline = EmergeCommand() + " -p --debug --color=n " + package | 152 cmdline = (EmergeCommand() + " -p --debug --color=n --with-bdeps=y " + |
| 153 "--selective=n " + package) |
| 154 if OPTS["workon"]: |
| 155 cmdline += " " + " ".join(OPTS["workon"]) |
151 print "+ %s" % cmdline | 156 print "+ %s" % cmdline |
152 | 157 |
153 # Store output in a temp file as it is too big for a unix pipe. | 158 # Store output in a temp file as it is too big for a unix pipe. |
154 stderr_buffer = tempfile.TemporaryFile() | 159 stderr_buffer = tempfile.TemporaryFile() |
155 stdout_buffer = tempfile.TemporaryFile() | 160 stdout_buffer = tempfile.TemporaryFile() |
156 # Launch the subprocess. | 161 # Launch the subprocess. |
157 start = time.time() | 162 start = time.time() |
158 depsproc = subprocess.Popen(shlex.split(cmdline), stderr=stderr_buffer, | 163 depsproc = subprocess.Popen(shlex.split(str(cmdline)), stderr=stderr_buffer, |
159 stdout=stdout_buffer, bufsize=64*1024) | 164 stdout=stdout_buffer, bufsize=64*1024) |
160 depsproc.wait() | 165 depsproc.wait() |
161 seconds = time.time() - start | 166 seconds = time.time() - start |
162 print "Deps calculated in %d:%04.1fs" % (seconds / 60, seconds % 60) | 167 print "Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60) |
163 stderr_buffer.seek(0) | 168 stderr_buffer.seek(0) |
164 stderr_raw = stderr_buffer.read() | 169 stderr_raw = stderr_buffer.read() |
165 info_start = stderr_raw.find("digraph") | 170 info_start = stderr_raw.find("digraph") |
166 stdout_buffer.seek(0) | 171 stdout_buffer.seek(0) |
167 stdout_raw = stdout_buffer.read() | 172 stdout_raw = stdout_buffer.read() |
168 lines = [] | 173 lines = [] |
169 if info_start != -1: | 174 if info_start != -1: |
170 lines = stderr_raw[info_start:].split("\n") | 175 lines = stderr_raw[info_start:].split("\n") |
171 lines.extend(stdout_raw.split("\n")) | 176 lines.extend(stdout_raw.split("\n")) |
172 if VERBOSE or depsproc.returncode != 0: | 177 if VERBOSE or depsproc.returncode != 0: |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
252 ", " + deptype + ")") | 257 ", " + deptype + ")") |
253 | 258 |
254 # Add our new package into the tree, if it's not already there. | 259 # Add our new package into the tree, if it's not already there. |
255 updatedep.setdefault(fullpkg, {}) | 260 updatedep.setdefault(fullpkg, {}) |
256 # Add an empty deps for this new package. | 261 # Add an empty deps for this new package. |
257 updatedep[fullpkg].setdefault("deps", {}) | 262 updatedep[fullpkg].setdefault("deps", {}) |
258 # Add the action we should take (merge, nomerge). | 263 # Add the action we should take (merge, nomerge). |
259 updatedep[fullpkg].setdefault("action", doins) | 264 updatedep[fullpkg].setdefault("action", doins) |
260 # Add the type of dep. | 265 # Add the type of dep. |
261 updatedep[fullpkg].setdefault("deptype", deptype) | 266 updatedep[fullpkg].setdefault("deptype", deptype) |
| 267 # Add the long name of the package |
| 268 updatedep[fullpkg].setdefault("pkgpath", "%s/%s" % (pkgdir, pkgname)) |
| 269 # Add the short name of the package |
| 270 updatedep[fullpkg].setdefault("pkgname", pkgname) |
262 | 271 |
263 # Drop any stack entries below our depth. | 272 # Drop any stack entries below our depth. |
264 deps_stack = deps_stack[0:depth] | 273 deps_stack = deps_stack[0:depth] |
265 # Add ourselves to the end of the stack. | 274 # Add ourselves to the end of the stack. |
266 deps_stack.append(fullpkg) | 275 deps_stack.append(fullpkg) |
267 elif m_orig: | 276 elif m_orig: |
268 # Also capture "pseudo packages", which are the freeform test | 277 # Also capture "pseudo packages", which are the freeform test |
269 # we requested to be installed. These are generic package names | 278 # we requested to be installed. These are generic package names |
270 # like "chromeos" rather than chromeos/chromeos-0.0.1 | 279 # like "chromeos" rather than chromeos/chromeos-0.0.1 |
271 depth = 0 | 280 depth = 0 |
272 # Tag these with "original" in case they overlap with real packages. | 281 # Tag these with "original" in case they overlap with real packages. |
273 pkgname = "original-%s" % m_orig.group("pkgname") | 282 pkgname = "original-%s" % m_orig.group("pkgname") |
274 # Insert this into the deps tree so so we can stick it in "world" | 283 # Insert this into the deps tree so so we can stick it in "world" |
275 updatedep = deps_tree | 284 updatedep = deps_tree |
276 for i in range(0, depth): | 285 for i in range(0, depth): |
277 updatedep = updatedep[deps_stack[i]]["deps"] | 286 updatedep = updatedep[deps_stack[i]]["deps"] |
278 if VERBOSE: | 287 if VERBOSE: |
279 print pkgname | 288 print pkgname |
280 # Add our new package into the tree, if it's not already there. | 289 # Add our new package into the tree, if it's not already there. |
281 updatedep.setdefault(pkgname, {}) | 290 updatedep.setdefault(pkgname, {}) |
282 updatedep[pkgname].setdefault("deps", {}) | 291 updatedep[pkgname].setdefault("deps", {}) |
283 # Add the type of dep. | 292 # Add the type of dep. |
284 updatedep[pkgname].setdefault("action", "world") | 293 updatedep[pkgname].setdefault("action", "world") |
285 updatedep[pkgname].setdefault("deptype", "normal") | 294 updatedep[pkgname].setdefault("deptype", "normal") |
| 295 updatedep[pkgname].setdefault("pkgpath", None) |
| 296 updatedep[pkgname].setdefault("pkgname", None) |
286 | 297 |
287 # Drop any obsolete stack entries. | 298 # Drop any obsolete stack entries. |
288 deps_stack = deps_stack[0:depth] | 299 deps_stack = deps_stack[0:depth] |
289 # Add ourselves to the end of the stack. | 300 # Add ourselves to the end of the stack. |
290 deps_stack.append(pkgname) | 301 deps_stack.append(pkgname) |
291 elif m_installed: | 302 elif m_installed: |
292 pkgname = m_installed.group("pkgname") | 303 pkgname = m_installed.group("pkgname") |
293 pkgdir = m_installed.group("pkgdir") | 304 pkgdir = m_installed.group("pkgdir") |
294 version = m_installed.group("version") | 305 version = m_installed.group("version") |
295 oldversion = m_installed.group("oldversion") | 306 oldversion = m_installed.group("oldversion") |
296 desc = m_installed.group("desc") | 307 desc = m_installed.group("desc") |
297 uninstall = False | 308 uninstall = False |
298 if oldversion and (desc.find("U") != -1 or desc.find("D") != -1): | 309 if oldversion and (desc.find("U") != -1 or desc.find("D") != -1): |
299 uninstall = True | 310 uninstall = True |
| 311 replace = desc.find("R") != -1 |
300 fullpkg = "%s/%s-%s" % (pkgdir, pkgname, version) | 312 fullpkg = "%s/%s-%s" % (pkgdir, pkgname, version) |
301 deps_info[fullpkg] = {"idx": len(deps_info), | 313 deps_info[fullpkg] = {"idx": len(deps_info), |
302 "pkgdir": pkgdir, | 314 "pkgdir": pkgdir, |
303 "pkgname": pkgname, | 315 "pkgname": pkgname, |
304 "oldversion": oldversion, | 316 "oldversion": oldversion, |
305 "uninstall": uninstall} | 317 "uninstall": uninstall, |
| 318 "replace": replace} |
306 else: | 319 else: |
307 # Is this a package that failed to match our huge regex? | 320 # Is this a package that failed to match our huge regex? |
308 m = re_failed.match(line) | 321 m = re_failed.match(line) |
309 if m: | 322 if m: |
310 print "\n".join(lines) | 323 print "\n".join(lines) |
311 print "FAIL: Couldn't understand line:" | 324 print "FAIL: Couldn't understand line:" |
312 print line | 325 print line |
313 sys.exit(1) | 326 sys.exit(1) |
314 | 327 |
315 return deps_tree, deps_info | 328 return deps_tree, deps_info |
316 | 329 |
317 | 330 |
318 def PrintTree(deps, depth=""): | 331 def PrintTree(deps, depth=""): |
319 """Print the deps we have seen in the emerge output. | 332 """Print the deps we have seen in the emerge output. |
320 | 333 |
321 Args: | 334 Args: |
322 deps: Dependency tree structure. | 335 deps: Dependency tree structure. |
323 depth: Allows printing the tree recursively, with indentation. | 336 depth: Allows printing the tree recursively, with indentation. |
324 """ | 337 """ |
325 for entry in deps: | 338 for entry in deps: |
326 action = deps[entry]["action"] | 339 action = deps[entry]["action"] |
327 print "%s %s (%s)" % (depth, entry, action) | 340 print "%s %s (%s)" % (depth, entry, action) |
328 PrintTree(deps[entry]["deps"], depth=depth + " ") | 341 PrintTree(deps[entry]["deps"], depth=depth + " ") |
329 | 342 |
330 | 343 |
331 def GenDependencyGraph(deps_tree, deps_info): | 344 def GenDependencyGraph(deps_tree, deps_info, package_names): |
332 """Generate a doubly linked dependency graph. | 345 """Generate a doubly linked dependency graph. |
333 | 346 |
334 Args: | 347 Args: |
335 deps_tree: Dependency tree structure. | 348 deps_tree: Dependency tree structure. |
336 deps_info: More details on the dependencies. | 349 deps_info: More details on the dependencies. |
| 350 package_names: Names of packages to add to the world file. |
337 Returns: | 351 Returns: |
338 Deps graph in the form of a dict of packages, with each package | 352 Deps graph in the form of a dict of packages, with each package |
339 specifying a "needs" list and "provides" list. | 353 specifying a "needs" list and "provides" list. |
340 """ | 354 """ |
341 deps_map = {} | 355 deps_map = {} |
| 356 pkgpaths = {} |
342 | 357 |
343 def ReverseTree(packages): | 358 def ReverseTree(packages): |
344 """Convert tree to digraph. | 359 """Convert tree to digraph. |
345 | 360 |
346 Take the tree of package -> requirements and reverse it to a digraph of | 361 Take the tree of package -> requirements and reverse it to a digraph of |
347 buildable packages -> packages they unblock. | 362 buildable packages -> packages they unblock. |
348 Args: | 363 Args: |
349 packages: Tree(s) of dependencies. | 364 packages: Tree(s) of dependencies. |
350 Returns: | 365 Returns: |
351 Unsanitized digraph. | 366 Unsanitized digraph. |
352 """ | 367 """ |
353 for pkg in packages: | 368 for pkg in packages: |
354 action = packages[pkg]["action"] | 369 action = packages[pkg]["action"] |
| 370 pkgpath = packages[pkg]["pkgpath"] |
| 371 pkgname = packages[pkg]["pkgname"] |
| 372 pkgpaths[pkgpath] = pkg |
| 373 pkgpaths[pkgname] = pkg |
355 this_pkg = deps_map.setdefault( | 374 this_pkg = deps_map.setdefault( |
356 pkg, {"needs": set(), "provides": set(), "action": "nomerge"}) | 375 pkg, {"needs": {}, "provides": set(), "action": "nomerge", |
| 376 "workon": False, "cmdline": False}) |
357 if action != "nomerge": | 377 if action != "nomerge": |
358 this_pkg["action"] = action | 378 this_pkg["action"] = action |
359 this_pkg["deps_info"] = deps_info.get(pkg) | 379 this_pkg["deps_info"] = deps_info.get(pkg) |
360 ReverseTree(packages[pkg]["deps"]) | 380 ReverseTree(packages[pkg]["deps"]) |
361 for dep, dep_item in packages[pkg]["deps"].items(): | 381 for dep, dep_item in packages[pkg]["deps"].items(): |
362 dep_pkg = deps_map[dep] | 382 dep_pkg = deps_map[dep] |
363 dep_type = dep_item["deptype"] | 383 dep_type = dep_item["deptype"] |
364 if dep_type != "(runtime_post)": | 384 if dep_type != "(runtime_post)": |
365 dep_pkg["provides"].add(pkg) | 385 dep_pkg["provides"].add(pkg) |
366 this_pkg["needs"].add(dep) | 386 this_pkg["needs"][dep] = dep_type |
367 | 387 |
368 def RemoveInstalledPackages(): | 388 def RemoveInstalledPackages(): |
369 """Remove installed packages, propagating dependencies.""" | 389 """Remove installed packages, propagating dependencies.""" |
370 | 390 |
| 391 if "--selective" in EMERGE_OPTS: |
| 392 selective = EMERGE_OPTS["--selective"] != "n" |
| 393 else: |
| 394 selective = "--noreplace" in EMERGE_OPTS or "--update" in EMERGE_OPTS |
371 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys()) | 395 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys()) |
| 396 for pkg, info in deps_info.items(): |
| 397 if selective and not deps_map[pkg]["workon"] and info["replace"]: |
| 398 rm_pkgs.add(pkg) |
372 for pkg in rm_pkgs: | 399 for pkg in rm_pkgs: |
373 this_pkg = deps_map[pkg] | 400 this_pkg = deps_map[pkg] |
| 401 if this_pkg["cmdline"] and "--oneshot" not in EMERGE_OPTS: |
| 402 # If "cmdline" is set, this is a world update that was passed on the |
| 403 # command-line. Keep these unless we're in --oneshot mode. |
| 404 continue |
374 needs = this_pkg["needs"] | 405 needs = this_pkg["needs"] |
375 provides = this_pkg["provides"] | 406 provides = this_pkg["provides"] |
376 for dep in needs: | 407 for dep in needs: |
377 dep_provides = deps_map[dep]["provides"] | 408 dep_provides = deps_map[dep]["provides"] |
378 dep_provides.update(provides) | 409 dep_provides.update(provides) |
379 dep_provides.discard(pkg) | 410 dep_provides.discard(pkg) |
380 dep_provides.discard(dep) | 411 dep_provides.discard(dep) |
381 for target in provides: | 412 for target in provides: |
382 target_needs = deps_map[target]["needs"] | 413 target_needs = deps_map[target]["needs"] |
383 target_needs.update(needs) | 414 target_needs.update(needs) |
384 target_needs.discard(pkg) | 415 if pkg in target_needs: |
385 target_needs.discard(target) | 416 del target_needs[pkg] |
| 417 if target in target_needs: |
| 418 del target_needs[target] |
386 del deps_map[pkg] | 419 del deps_map[pkg] |
387 | 420 |
388 def SanitizeDep(basedep, currdep, oldstack, limit): | 421 def SanitizeDep(basedep, currdep, visited, cycle): |
389 """Search for circular deps between basedep and currdep, then recurse. | 422 """Search for circular deps between basedep and currdep, then recurse. |
390 | 423 |
391 Args: | 424 Args: |
392 basedep: Original dependency, top of stack. | 425 basedep: Original dependency, top of stack. |
393 currdep: Bottom of our current recursion, bottom of stack. | 426 currdep: Bottom of our current recursion, bottom of stack. |
394 oldstack: Current dependency chain. | 427 visited: Nodes visited so far. |
395 limit: How many more levels of recusion to go through, max. | 428 cycle: Array where cycle of circular dependencies should be stored. |
396 TODO(): Break RDEPEND preferentially. | 429 TODO(): Break RDEPEND preferentially. |
397 Returns: | 430 Returns: |
398 True iff circular dependencies are found. | 431 True iff circular dependencies are found. |
399 """ | 432 """ |
400 if limit == 0: | 433 if currdep not in visited: |
401 return | 434 visited.add(currdep) |
402 for dep in deps_map[currdep]["needs"]: | 435 for dep in deps_map[currdep]["needs"]: |
403 stack = oldstack + [dep] | 436 if dep == basedep or SanitizeDep(basedep, dep, visited, cycle): |
404 if basedep in deps_map[dep]["needs"] or dep == basedep: | 437 cycle.insert(0, dep) |
405 if dep != basedep: | 438 return True |
406 stack += [basedep] | 439 return False |
407 print "Remove cyclic dependency from:" | |
408 for i in xrange(0, len(stack) - 1): | |
409 print " %s -> %s " % (stack[i], stack[i+1]) | |
410 return True | |
411 if dep not in oldstack and SanitizeDep(basedep, dep, stack, limit - 1): | |
412 return True | |
413 return | |
414 | 440 |
415 def SanitizeTree(): | 441 def SanitizeTree(): |
416 """Remove circular dependencies up to cycle length 32.""" | 442 """Remove circular dependencies.""" |
417 start = time.time() | 443 start = time.time() |
418 for basedep in deps_map: | 444 for basedep in deps_map: |
419 for dep in deps_map[basedep]["needs"].copy(): | 445 this_pkg = deps_map[basedep] |
420 if deps_info[basedep]["idx"] <= deps_info[dep]["idx"]: | 446 if this_pkg["action"] == "world": |
421 if SanitizeDep(basedep, dep, [basedep, dep], 31): | 447 # world file updates can't be involved in cycles, |
422 print "Breaking", basedep, " -> ", dep | 448 # and they don't have deps_info, so skip them. |
423 deps_map[basedep]["needs"].remove(dep) | 449 continue |
424 deps_map[dep]["provides"].remove(basedep) | 450 for dep in this_pkg["needs"].copy(): |
| 451 cycle = [] |
| 452 if (deps_info[basedep]["idx"] <= deps_info[dep]["idx"] and |
| 453 SanitizeDep(basedep, dep, set(), cycle)): |
| 454 cycle[:0] = [basedep, dep] |
| 455 print "Breaking cycle:" |
| 456 for i in range(len(cycle) - 1): |
| 457 deptype = deps_map[cycle[i]]["needs"][cycle[i+1]] |
| 458 print " %s -> %s %s" % (cycle[i], cycle[i+1], deptype) |
| 459 del this_pkg["needs"][dep] |
| 460 deps_map[dep]["provides"].remove(basedep) |
425 seconds = time.time() - start | 461 seconds = time.time() - start |
426 print "Tree sanitized in %d:%04.1fs" % (seconds / 60, seconds % 60) | 462 print "Tree sanitized in %d:%04.1fs" % (seconds / 60, seconds % 60) |
427 | 463 |
428 def AddSecretDeps(): | 464 def AddSecretDeps(): |
429 """Find these tagged packages and add extra dependencies. | 465 """Find these tagged packages and add extra dependencies. |
430 | 466 |
431 For debugging dependency problems. | 467 For debugging dependency problems. |
432 """ | 468 """ |
433 for bad in secret_deps: | 469 for bad in secret_deps: |
434 needed = secret_deps[bad] | 470 needed = secret_deps[bad] |
435 bad_pkg = None | 471 bad_pkg = None |
436 needed_pkg = None | 472 needed_pkg = None |
437 for dep in deps_map: | 473 for dep in deps_map: |
438 if dep.find(bad) != -1: | 474 if dep.find(bad) != -1: |
439 bad_pkg = dep | 475 bad_pkg = dep |
440 if dep.find(needed) != -1: | 476 if dep.find(needed) != -1: |
441 needed_pkg = dep | 477 needed_pkg = dep |
442 if bad_pkg and needed_pkg: | 478 if bad_pkg and needed_pkg: |
443 deps_map[needed_pkg]["provides"].add(bad_pkg) | 479 deps_map[needed_pkg]["provides"].add(bad_pkg) |
444 deps_map[bad_pkg]["needs"].add(needed_pkg) | 480 deps_map[bad_pkg]["needs"].add(needed_pkg) |
445 | 481 |
| 482 def WorkOnChildren(pkg): |
| 483 """Mark this package and all packages it provides as workon packages.""" |
| 484 |
| 485 this_pkg = deps_map[pkg] |
| 486 if this_pkg["workon"]: |
| 487 return False |
| 488 |
| 489 this_pkg["workon"] = True |
| 490 updated = False |
| 491 for w in this_pkg["provides"]: |
| 492 if WorkOnChildren(w): |
| 493 updated = True |
| 494 |
| 495 if this_pkg["action"] == "nomerge": |
| 496 pkgpath = deps_tree[pkg]["pkgpath"] |
| 497 if pkgpath is not None: |
| 498 OPTS["workon"].add(pkgpath) |
| 499 updated = True |
| 500 |
| 501 return updated |
| 502 |
446 ReverseTree(deps_tree) | 503 ReverseTree(deps_tree) |
447 AddSecretDeps() | 504 AddSecretDeps() |
| 505 |
| 506 if "no-workon-deps" in OPTS: |
| 507 for pkgpath in OPTS["workon"].copy(): |
| 508 pkg = pkgpaths[pkgpath] |
| 509 deps_map[pkg]["workon"] = True |
| 510 else: |
| 511 mergelist_updated = False |
| 512 for pkgpath in OPTS["workon"].copy(): |
| 513 pkg = pkgpaths[pkgpath] |
| 514 if WorkOnChildren(pkg): |
| 515 mergelist_updated = True |
| 516 if mergelist_updated: |
| 517 print "List of packages to merge updated. Recalculate dependencies..." |
| 518 return None |
| 519 |
| 520 for pkgpath in package_names: |
| 521 dep_pkg = deps_map.get("original-%s" % pkgpath) |
| 522 if dep_pkg and len(dep_pkg["needs"]) == 1: |
| 523 dep_pkg["cmdline"] = True |
| 524 |
448 RemoveInstalledPackages() | 525 RemoveInstalledPackages() |
449 SanitizeTree() | 526 SanitizeTree() |
450 return deps_map | 527 return deps_map |
451 | 528 |
452 | 529 |
453 def PrintDepsMap(deps_map): | 530 def PrintDepsMap(deps_map): |
454 """Print dependency graph, for each package list it's prerequisites.""" | 531 """Print dependency graph, for each package list it's prerequisites.""" |
455 for i in deps_map: | 532 for i in deps_map: |
456 print "%s: (%s) needs" % (i, deps_map[i]["action"]) | 533 print "%s: (%s) needs" % (i, deps_map[i]["action"]) |
457 for j in deps_map[i]["needs"]: | 534 for j in deps_map[i]["needs"]: |
(...skipping 12 matching lines...) Expand all Loading... |
470 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"] | 547 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"] |
471 self._total_jobs = len(install_jobs) | 548 self._total_jobs = len(install_jobs) |
472 | 549 |
473 # Initialize the ready queue, these are jobs with no unmet dependencies. | 550 # Initialize the ready queue, these are jobs with no unmet dependencies. |
474 self._emerge_queue = [x for x in deps_map if not deps_map[x]["needs"]] | 551 self._emerge_queue = [x for x in deps_map if not deps_map[x]["needs"]] |
475 # Initialize the failed queue to empty. | 552 # Initialize the failed queue to empty. |
476 self._retry_queue = [] | 553 self._retry_queue = [] |
477 self._failed = {} | 554 self._failed = {} |
478 | 555 |
479 def _LoadAvg(self): | 556 def _LoadAvg(self): |
480 loads = open('/proc/loadavg', 'r').readline().split()[:3] | 557 loads = open("/proc/loadavg", "r").readline().split()[:3] |
481 return ' '.join(loads) | 558 return " ".join(loads) |
482 | 559 |
483 def _Status(self): | 560 def _Status(self): |
484 """Print status.""" | 561 """Print status.""" |
485 seconds = time.time() - GLOBAL_START | 562 seconds = time.time() - GLOBAL_START |
486 print "Pending %s, Ready %s, Running %s, Retrying %s, Total %s " \ | 563 line = ("Pending %s, Ready %s, Running %s, Retrying %s, Total %s " |
487 "[Time %dm%ds Load %s]" % ( | 564 "[Time %dm%ds Load %s]") |
488 len(self._deps_map), len(self._emerge_queue), | 565 print line % (len(self._deps_map), len(self._emerge_queue), |
489 len(self._jobs), len(self._retry_queue), self._total_jobs, | 566 len(self._jobs), len(self._retry_queue), self._total_jobs, |
490 seconds / 60, seconds % 60, self._LoadAvg()) | 567 seconds / 60, seconds % 60, self._LoadAvg()) |
491 | 568 |
492 def _LaunchOneEmerge(self, target): | 569 def _LaunchOneEmerge(self, target): |
493 """Run emerge --nodeps to do a single package install. | 570 """Run emerge --nodeps to do a single package install. |
494 | 571 |
495 If this is a pseudopackage, that means we're done, and can select in in the | 572 If this is a pseudopackage, that means we're done, and can select in in the |
496 world file. | 573 world file. |
497 Args: | 574 Args: |
498 target: The full package name of the package to install. | 575 target: The full package name of the package to install. |
499 eg. "sys-apps/portage-2.17" | 576 eg. "sys-apps/portage-2.17" |
500 Returns: | 577 Returns: |
501 Triplet containing (target name, subprocess object, output buffer object). | 578 Triplet containing (target name, subprocess object, output buffer object). |
502 """ | 579 """ |
503 if target.startswith("original-"): | 580 if target.startswith("original-"): |
504 # "original-" signifies one of the packages we originally requested. | 581 # "original-" signifies one of the packages we originally requested. |
505 # Since we have explicitly installed the versioned package as a dep of | 582 # Since we have explicitly installed the versioned package as a dep of |
506 # this, we only need to tag in "world" that we are done with this | 583 # this, we only need to tag in "world" that we are done with this |
507 # install request. "--select -n" indicates an addition to "world" | 584 # install request. |
508 # without an actual install. | 585 # --nodeps: Ignore dependencies -- we handle them internally. |
| 586 # --noreplace: Don't replace or upgrade any packages. (In this case, the |
| 587 # package is already installed, so we are just updating the |
| 588 # world file.) |
| 589 # --selective: Make sure that --noreplace sticks even if --selective=n is |
| 590 # specified by the user on the command-line. |
| 591 # NOTE: If the user specifies --oneshot on the command-line, this command |
| 592 # will do nothing. That is desired, since the user requested not to |
| 593 # update the world file. |
509 newtarget = target.replace("original-", "") | 594 newtarget = target.replace("original-", "") |
510 cmdline = EmergeCommand() + " --nodeps --select --noreplace " + newtarget | 595 cmdline = (EmergeCommand() + " --nodeps --selective --noreplace " + |
| 596 newtarget) |
511 else: | 597 else: |
512 # This package is a dependency of something we specifically | 598 # This package is a dependency of something we specifically |
513 # requested. Therefore we should install it but not allow it | 599 # requested. Therefore we should install it but not allow it |
514 # in the "world" file, which represents explicit intalls. | 600 # in the "world" file, which represents explicit installs. |
515 # "--oneshot" here will prevent it from being tagged in world. | 601 # --oneshot" here will prevent it from being tagged in world. |
516 cmdline = EmergeCommand() + " --nodeps --oneshot =" + target | 602 cmdline = EmergeCommand() + " --nodeps --oneshot " |
517 deps_info = self._deps_map[target]["deps_info"] | 603 this_pkg = self._deps_map[target] |
| 604 if this_pkg["workon"]: |
| 605 # --usepkg=n --getbinpkg=n: Build from source |
| 606 # --selective=n: Re-emerge even if package is already installed. |
| 607 cmdline += "--usepkg=n --getbinpkg=n --selective=n " |
| 608 cmdline += "=" + target |
| 609 deps_info = this_pkg["deps_info"] |
518 if deps_info["uninstall"]: | 610 if deps_info["uninstall"]: |
519 package = "%(pkgdir)s/%(pkgname)s-%(oldversion)s" % deps_info | 611 package = "%(pkgdir)s/%(pkgname)s-%(oldversion)s" % deps_info |
520 cmdline += " && %s -1C =%s" % (EmergeCommand(), package) | 612 cmdline += " && %s -C =%s" % (EmergeCommand(), package) |
521 | 613 |
522 print "+ %s" % cmdline | 614 print "+ %s" % cmdline |
523 | 615 |
524 # Store output in a temp file as it is too big for a unix pipe. | 616 # Store output in a temp file as it is too big for a unix pipe. |
525 stdout_buffer = tempfile.TemporaryFile() | 617 stdout_buffer = tempfile.TemporaryFile() |
526 # Modify the environment to disable locking. | 618 # Modify the environment to disable locking. |
527 portage_env = os.environ.copy() | 619 portage_env = os.environ.copy() |
528 portage_env["PORTAGE_LOCKS"] = "false" | 620 portage_env["PORTAGE_LOCKS"] = "false" |
529 portage_env["UNMERGE_DELAY"] = "0" | 621 portage_env["UNMERGE_DELAY"] = "0" |
530 # Autoclean rummages around in the portage database and uninstalls | 622 # Autoclean rummages around in the portage database and uninstalls |
531 # old packages. It's not parallel safe, so we skip it. Instead, we | 623 # old packages. It's not parallel safe, so we skip it. Instead, we |
532 # handle the cleaning ourselves by uninstalling old versions of any | 624 # handle the cleaning ourselves by uninstalling old versions of any |
533 # new packages we install. | 625 # new packages we install. |
534 if not AUTOCLEAN: | 626 if not AUTOCLEAN: |
535 portage_env["AUTOCLEAN"] = "no" | 627 portage_env["AUTOCLEAN"] = "no" |
536 # Launch the subprocess. | 628 # Launch the subprocess. |
537 emerge_proc = subprocess.Popen( | 629 emerge_proc = subprocess.Popen( |
538 cmdline, shell=True, stdout=stdout_buffer, | 630 cmdline, shell=True, stdout=stdout_buffer, |
539 stderr=subprocess.STDOUT, bufsize=64*1024, env=portage_env) | 631 stderr=subprocess.STDOUT, bufsize=64*1024, env=portage_env) |
540 | 632 |
541 return (target, emerge_proc, stdout_buffer) | 633 return (target, emerge_proc, stdout_buffer) |
542 | 634 |
543 def _Finish(self, target): | 635 def _Finish(self, target): |
544 """Mark a target as completed and unblock dependecies.""" | 636 """Mark a target as completed and unblock dependecies.""" |
545 for dep in self._deps_map[target]["provides"]: | 637 for dep in self._deps_map[target]["provides"]: |
546 self._deps_map[dep]["needs"].remove(target) | 638 del self._deps_map[dep]["needs"][target] |
547 if not self._deps_map[dep]["needs"]: | 639 if not self._deps_map[dep]["needs"]: |
548 if VERBOSE: | 640 if VERBOSE: |
549 print "Unblocking %s" % dep | 641 print "Unblocking %s" % dep |
550 self._emerge_queue.append(dep) | 642 self._emerge_queue.append(dep) |
551 self._deps_map.pop(target) | 643 self._deps_map.pop(target) |
552 | 644 |
553 def _Retry(self): | 645 def _Retry(self): |
554 if self._retry_queue: | 646 if self._retry_queue: |
555 target = self._retry_queue.pop(0) | 647 target = self._retry_queue.pop(0) |
556 self._emerge_queue.append(target) | 648 self._emerge_queue.append(target) |
557 print "Retrying emerge of %s." % target | 649 print "Retrying emerge of %s." % target |
558 | 650 |
559 def Run(self): | 651 def Run(self): |
560 """Run through the scheduled ebuilds. | 652 """Run through the scheduled ebuilds. |
561 | 653 |
562 Keep running so long as we have uninstalled packages in the | 654 Keep running so long as we have uninstalled packages in the |
563 dependency graph to merge. | 655 dependency graph to merge. |
564 """ | 656 """ |
565 secs = 0 | 657 secs = 0 |
| 658 max_jobs = EMERGE_OPTS.get("--jobs", 256) |
566 while self._deps_map: | 659 while self._deps_map: |
567 # If we have packages that are ready, kick them off. | 660 # If we have packages that are ready, kick them off. |
568 if self._emerge_queue and len(self._jobs) < JOBS: | 661 if self._emerge_queue and len(self._jobs) < max_jobs: |
569 target = self._emerge_queue.pop(0) | 662 target = self._emerge_queue.pop(0) |
570 action = self._deps_map[target]["action"] | 663 action = self._deps_map[target]["action"] |
571 # We maintain a tree of all deps, if this doesn't need | 664 # We maintain a tree of all deps, if this doesn't need |
572 # to be installed just free up it's children and continue. | 665 # to be installed just free up it's children and continue. |
573 # It is possible to reinstall deps of deps, without reinstalling | 666 # It is possible to reinstall deps of deps, without reinstalling |
574 # first level deps, like so: | 667 # first level deps, like so: |
575 # chromeos (merge) -> eselect (nomerge) -> python (merge) | 668 # chromeos (merge) -> eselect (nomerge) -> python (merge) |
576 if action == "nomerge": | 669 if action == "nomerge": |
577 self._Finish(target) | 670 self._Finish(target) |
578 else: | 671 else: |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
646 | 739 |
647 print "Completed %s" % target | 740 print "Completed %s" % target |
648 # Mark as completed and unblock waiting ebuilds. | 741 # Mark as completed and unblock waiting ebuilds. |
649 self._Finish(target) | 742 self._Finish(target) |
650 | 743 |
651 # Print an update. | 744 # Print an update. |
652 self._Status() | 745 self._Status() |
653 | 746 |
654 | 747 |
655 # Main control code. | 748 # Main control code. |
656 PACKAGE, EMERGE_ARGS, BOARD, JOBS = ParseArgs(sys.argv) | 749 OPTS, EMERGE_ACTION, EMERGE_OPTS, EMERGE_FILES = ParseArgs(sys.argv) |
657 | 750 |
658 if not PACKAGE: | 751 if EMERGE_ACTION is not None: |
659 # No packages. Pass straight through to emerge. | 752 # Pass action arguments straight through to emerge |
660 # Allows users to just type ./parallel_emerge --depclean | 753 EMERGE_OPTS["--%s" % EMERGE_ACTION] = True |
661 sys.exit(os.system(EmergeCommand())) | 754 sys.exit(os.system(EmergeCommand())) |
| 755 elif not EMERGE_FILES: |
| 756 Usage() |
| 757 sys.exit(1) |
662 | 758 |
663 print "Starting fast-emerge." | 759 print "Starting fast-emerge." |
664 print " Building package %s on %s (%s)" % (PACKAGE, EMERGE_ARGS, BOARD) | 760 print " Building package %s on %s" % (" ".join(EMERGE_FILES), |
665 print "Running emerge to generate deps" | 761 OPTS.get("board", "root")) |
666 deps_output = GetDepsFromPortage(PACKAGE) | |
667 print "Processing emerge output" | |
668 dependency_tree, dependency_info = DepsToTree(deps_output) | |
669 if VERBOSE: | |
670 print "Print tree" | |
671 PrintTree(dependency_tree) | |
672 | 762 |
673 print "Generate dependency graph." | 763 # If the user supplied the --workon option, we may have to run emerge twice |
674 dependency_graph = GenDependencyGraph(dependency_tree, dependency_info) | 764 # to generate a dependency ordering for packages that depend on the workon |
| 765 # packages. |
| 766 for it in range(2): |
| 767 print "Running emerge to generate deps" |
| 768 deps_output = GetDepsFromPortage(" ".join(EMERGE_FILES)) |
| 769 |
| 770 print "Processing emerge output" |
| 771 dependency_tree, dependency_info = DepsToTree(deps_output) |
| 772 |
| 773 if VERBOSE: |
| 774 print "Print tree" |
| 775 PrintTree(dependency_tree) |
| 776 |
| 777 print "Generate dependency graph." |
| 778 dependency_graph = GenDependencyGraph(dependency_tree, dependency_info, |
| 779 EMERGE_FILES) |
| 780 |
| 781 if dependency_graph is not None: |
| 782 break |
| 783 else: |
| 784 print "Can't crack cycle" |
| 785 sys.exit(1) |
675 | 786 |
676 if VERBOSE: | 787 if VERBOSE: |
677 PrintDepsMap(dependency_graph) | 788 PrintDepsMap(dependency_graph) |
678 | 789 |
679 # Run the queued emerges. | 790 # Run the queued emerges. |
680 scheduler = EmergeQueue(dependency_graph) | 791 scheduler = EmergeQueue(dependency_graph) |
681 scheduler.Run() | 792 scheduler.Run() |
682 | 793 |
683 print "Done" | 794 print "Done" |
684 | 795 |
OLD | NEW |