| OLD | NEW |
| 1 #!/usr/bin/python2.6 | 1 #!/usr/bin/python2.6 |
| 2 # Copyright (c) 2010 The Chromium OS Authors. All rights reserved. | 2 # Copyright (c) 2010 The Chromium OS Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
| 5 | 5 |
| 6 """Program to run emerge in parallel, for significant speedup. | 6 """Program to run emerge in parallel, for significant speedup. |
| 7 | 7 |
| 8 Usage: | 8 Usage: |
| 9 ./parallel_emerge --board=BOARD [emerge args] package | 9 ./parallel_emerge --board=BOARD [emerge args] package |
| 10 | 10 |
| (...skipping 13 matching lines...) Expand all Loading... |
| 24 Parallel Emerge unlocks two things during operation, here's what you | 24 Parallel Emerge unlocks two things during operation, here's what you |
| 25 must do to keep this safe: | 25 must do to keep this safe: |
| 26 * Storage dir containing binary packages. - Don't emerge new | 26 * Storage dir containing binary packages. - Don't emerge new |
| 27 packages while installing the existing ones. | 27 packages while installing the existing ones. |
| 28 * Portage database - You must not examine deps while modifying the | 28 * Portage database - You must not examine deps while modifying the |
| 29 database. Therefore you may only parallelize "-p" read only access, | 29 database. Therefore you may only parallelize "-p" read only access, |
| 30 or "--nodeps" write only access. | 30 or "--nodeps" write only access. |
| 31 Caveats: | 31 Caveats: |
| 32 * Some ebuild packages have incorrectly specified deps, and running | 32 * Some ebuild packages have incorrectly specified deps, and running |
| 33 them in parallel is more likely to bring out these failures. | 33 them in parallel is more likely to bring out these failures. |
| 34 * Portage "world" is a record of explicitly installed packages. In | |
| 35 this parallel scheme, explicitly installed packages are installed | |
| 36 twice, once for the real install, and once for world file addition. | |
| 37 * Some ebuilds (especially the build part) have complex dependencies | 34 * Some ebuilds (especially the build part) have complex dependencies |
| 38 that are not captured well by this script (it may be necessary to | 35 that are not captured well by this script (it may be necessary to |
| 39 install an old package to build, but then install a newer version | 36 install an old package to build, but then install a newer version |
| 40 of the same package for a runtime dep). This script is only | 37 of the same package for a runtime dep). |
| 41 currently stable for binpkg installs. | |
| 42 """ | 38 """ |
| 43 | 39 |
| 44 import os | 40 import os |
| 45 import re | 41 import re |
| 46 import shlex | 42 import shlex |
| 47 import subprocess | 43 import subprocess |
| 48 import sys | 44 import sys |
| 49 import tempfile | 45 import tempfile |
| 50 import time | 46 import time |
| 51 | 47 |
| 52 | 48 |
| 53 def Usage(): | 49 def Usage(): |
| 54 print "Usage:" | 50 print "Usage:" |
| 55 print " ./parallel_emerge --board=BOARD [emerge args] package" | 51 print " ./parallel_emerge --board=BOARD --jobs=JOBS [emerge args] package" |
| 56 sys.exit(1) | 52 sys.exit(1) |
| 57 | 53 |
| 58 | 54 |
| 59 # These are dependencies that are not specified in the package, | 55 # These are dependencies that are not specified in the package, |
| 60 # but will prevent the package from installing. | 56 # but will prevent the package from installing. |
| 61 secret_deps = {} | 57 secret_deps = {} |
| 62 | 58 |
| 63 # Globals: package we are building, board we are targeting, | 59 # Globals: package we are building, board we are targeting, |
| 64 # emerge args we are passing through. | 60 # emerge args we are passing through. |
| 65 PACKAGE = None | 61 PACKAGE = None |
| 66 EMERGE_ARGS = "" | 62 EMERGE_ARGS = "" |
| 67 BOARD = None | 63 BOARD = None |
| 68 | 64 |
| 69 # Runtime flags. TODO(): maybe make these commandline options or | 65 # Runtime flags. TODO(): Maybe make these command-line options or |
| 70 # environment veriables. | 66 # environment variables. |
| 71 VERBOSE = False | 67 VERBOSE = False |
| 72 AUTOCLEAN = False | 68 AUTOCLEAN = False |
| 73 | 69 |
| 74 | 70 |
| 75 def ParseArgs(argv): | 71 def ParseArgs(argv): |
| 76 """Set global vars based on command line. | 72 """Set global vars based on command line. |
| 77 | 73 |
| 78 We need to be compatible with emerge arg format. | 74 We need to be compatible with emerge arg format. |
| 79 We scrape --board-XXX, and distinguish between args | 75 We scrape --board=XXX and --jobs=XXX, and distinguish between args |
| 80 and package names. | 76 and package names. |
| 81 TODO(): robustify argument processing, as it's possible to | 77 TODO(): Robustify argument processing, as it's possible to |
| 82 pass in many two argument parameters that are difficult | 78 pass in many two argument parameters that are difficult |
| 83 to programmaitcally identify, although we don't currently | 79 to programmatically identify, although we don't currently |
| 84 use any besides --bdeps <y|n>. | 80 use any besides --with-bdeps <y|n>. |
| 85 Args: | 81 Args: |
| 86 argv: arguments list | 82 argv: arguments list |
| 87 Returns: | 83 Returns: |
| 88 triplet of (package list, emerge argumens, board string) | 84 triplet of (package list, emerge argumens, board string) |
| 89 """ | 85 """ |
| 90 if VERBOSE: | 86 if VERBOSE: |
| 91 print argv | 87 print argv |
| 92 board_arg = None | 88 board_arg = None |
| 89 jobs_arg = 0 |
| 93 package_args = [] | 90 package_args = [] |
| 94 emerge_passthru_args = "" | 91 emerge_passthru_args = "" |
| 95 re_board = re.compile(r"--board=(?P<board>.*)") | |
| 96 for arg in argv[1:]: | 92 for arg in argv[1:]: |
| 97 # Check if the arg begins with '-' | 93 # Specifically match "--board=" and "--jobs=". |
| 98 if arg[0] == "-" or arg == "y" or arg == "n": | 94 if arg.startswith("--board="): |
| 99 # Specifically match "--board=" | 95 board_arg = arg.replace("--board=", "") |
| 100 m = re_board.match(arg) | 96 elif arg.startswith("--jobs="): |
| 101 if m: | 97 try: |
| 102 board_arg = m.group("board") | 98 jobs_arg = int(arg.replace("--jobs=", "")) |
| 103 else: | 99 except ValueError: |
| 104 # Pass through to emerge. | 100 print "Unrecognized argument:", arg |
| 105 emerge_passthru_args = emerge_passthru_args + " " + arg | 101 Usage() |
| 102 sys.exit(1) |
| 103 elif arg.startswith("-") or arg == "y" or arg == "n": |
| 104 # Not a package name, so pass through to emerge. |
| 105 emerge_passthru_args = emerge_passthru_args + " " + arg |
| 106 else: | 106 else: |
| 107 # Only non-dashed arg should be the target package. | |
| 108 package_args.append(arg) | 107 package_args.append(arg) |
| 109 | 108 |
| 110 if not package_args: | 109 if not package_args and not emerge_passthru_args: |
| 111 Usage() | 110 Usage() |
| 112 sys.exit(1) | 111 sys.exit(1) |
| 113 | 112 |
| 113 # Default to lots of jobs |
| 114 if jobs_arg <= 0: |
| 115 jobs_arg = 256 |
| 116 |
| 114 # Set globals. | 117 # Set globals. |
| 115 return " ".join(package_args), emerge_passthru_args, board_arg | 118 return " ".join(package_args), emerge_passthru_args, board_arg, jobs_arg |
| 116 | 119 |
| 117 | 120 |
| 118 def EmergeCommand(): | 121 def EmergeCommand(): |
| 119 """Helper function to return the base emerge commandline. | 122 """Helper function to return the base emerge commandline. |
| 120 | 123 |
| 121 This is configured for board type, and including pass thru args, | 124 This is configured for board type, and including pass thru args, |
| 122 using global variables. TODO(): unglobalfy. | 125 using global variables. TODO(): Unglobalfy. |
| 123 Returns: | 126 Returns: |
| 124 string containing emerge command. | 127 string containing emerge command. |
| 125 """ | 128 """ |
| 126 emerge = "emerge" | 129 emerge = "emerge" |
| 127 if BOARD: | 130 if BOARD: |
| 128 emerge += "-" + BOARD | 131 emerge += "-" + BOARD |
| 129 return emerge + " " + EMERGE_ARGS | 132 return emerge + " " + EMERGE_ARGS |
| 130 | 133 |
| 131 | 134 |
| 132 def GetDepsFromPortage(package): | 135 def GetDepsFromPortage(package): |
| 133 """Get dependency tree info by running emerge. | 136 """Get dependency tree info by running emerge. |
| 134 | 137 |
| 135 Run 'emerge -p --debug package', and get a text output of all deps. | 138 Run 'emerge -p --debug package', and get a text output of all deps. |
| 136 TODO(): Put dep caclation in a library, as cros_extract_deps | 139 TODO(): Put dep calculation in a library, as cros_extract_deps |
| 137 also uses this code. | 140 also uses this code. |
| 138 Args: | 141 Args: |
| 139 package: string containing the packages to build. | 142 package: String containing the packages to build. |
| 140 Returns: | 143 Returns: |
| 141 text output of emerge -p --debug, which can be processed elsewhere. | 144 Text output of emerge -p --debug, which can be processed elsewhere. |
| 142 """ | 145 """ |
| 143 print "Calculating deps for package %s" % package | 146 print "Calculating deps for package %s" % package |
| 144 cmdline = EmergeCommand() + " -p --debug --color=n " + package | 147 cmdline = EmergeCommand() + " -p --debug --color=n " + package |
| 145 print "+ %s" % cmdline | 148 print "+ %s" % cmdline |
| 146 | 149 |
| 147 # Store output in a temp file as it is too big for a unix pipe. | 150 # Store output in a temp file as it is too big for a unix pipe. |
| 148 stderr_buffer = tempfile.TemporaryFile() | 151 stderr_buffer = tempfile.TemporaryFile() |
| 149 stdout_buffer = tempfile.TemporaryFile() | 152 stdout_buffer = tempfile.TemporaryFile() |
| 150 # Launch the subprocess. | 153 # Launch the subprocess. |
| 151 start = time.time() | 154 start = time.time() |
| (...skipping 18 matching lines...) Expand all Loading... |
| 170 print "Failed to generate deps" | 173 print "Failed to generate deps" |
| 171 sys.exit(1) | 174 sys.exit(1) |
| 172 | 175 |
| 173 return lines | 176 return lines |
| 174 | 177 |
| 175 | 178 |
| 176 def DepsToTree(lines): | 179 def DepsToTree(lines): |
| 177 """Regex the output from 'emerge --debug' to generate a nested dict of deps. | 180 """Regex the output from 'emerge --debug' to generate a nested dict of deps. |
| 178 | 181 |
| 179 Args: | 182 Args: |
| 180 lines: output from 'emerge -p --debug package' | 183 lines: Output from 'emerge -p --debug package'. |
| 181 Returns: | 184 Returns: |
| 182 dep_tree: nested dict of dependencies, as specified by emerge. | 185 dep_tree: Nested dict of dependencies, as specified by emerge. |
| 183 there may be dupes, or circular deps. | 186 There may be dupes, or circular deps. |
| 184 | 187 |
| 185 We need to regex lines as follows: | 188 We need to regex lines as follows: |
| 186 hard-host-depends depends on | 189 hard-host-depends depends on |
| 187 ('ebuild', '/', 'dev-lang/swig-1.3.36', 'merge') depends on | 190 ('ebuild', '/', 'dev-lang/swig-1.3.36', 'merge') depends on |
| 188 ('ebuild', '/', 'dev-lang/perl-5.8.8-r8', 'merge') (buildtime) | 191 ('ebuild', '/', 'dev-lang/perl-5.8.8-r8', 'merge') (buildtime) |
| 189 ('binary', '/.../rootfs/', 'sys-auth/policykit-0.9-r1', 'merge') depends on | 192 ('binary', '/.../rootfs/', 'sys-auth/policykit-0.9-r1', 'merge') depends on |
| 190 ('binary', '/.../rootfs/', 'x11-misc/xbitmaps-1.1.0', 'merge') (no children) | 193 ('binary', '/.../rootfs/', 'x11-misc/xbitmaps-1.1.0', 'merge') (no children) |
| 191 """ | 194 """ |
| 192 | 195 |
| 193 re_deps = re.compile(r"(?P<indent>\W*)\(\'(?P<pkgtype>\w+)\', " | 196 re_deps = re.compile(r"(?P<indent>\W*)\(\'(?P<pkgtype>\w+)\', " |
| 194 r"\'(?P<destination>[\w/\.-]+)\'," | 197 r"\'(?P<destination>[\w/\.-]+)\'," |
| 195 r" \'(?P<pkgdir>[\w\+-]+)/(?P<pkgname>[\w\+-]+)-" | 198 r" \'(?P<pkgdir>[\w\+-]+)/(?P<pkgname>[\w\+-]+)-" |
| 196 r"(?P<version>\d+[\w\.-]*)\', \'(?P<action>\w+)\'\) " | 199 r"(?P<version>\d+[\w\.-]*)\', \'(?P<action>\w+)\'\) " |
| 197 r"(?P<deptype>(depends on|\(.*\)))") | 200 r"(?P<deptype>(depends on|\(.*\)))") |
| 198 re_origdeps = re.compile(r"(?P<pkgname>[\w\+/-]+) depends on") | 201 re_origdeps = re.compile(r"(?P<pkgname>[\w\+/-]+) depends on") |
| 199 re_installed_package = re.compile( | 202 re_installed_package = re.compile( |
| 200 r"\[(?P<desc>[^\]]*)\] " | 203 r"\[(?P<desc>[^\]]*)\] " |
| 201 r"(?P<pkgdir>[\w\+-]+)/" | 204 r"(?P<pkgdir>[\w\+-]+)/" |
| 202 r"(?P<pkgname>[\w\+-]+)-" | 205 r"(?P<pkgname>[\w\+-]+)-" |
| 203 r"(?P<version>\d+[\w\.-]*)( \[" | 206 r"(?P<version>\d+[\w\.-]*)( \[" |
| 204 r"(?P<oldversion>\d+[\w\.-]*)\])?" | 207 r"(?P<oldversion>\d+[\w\.-]*)\])?" |
| 205 ) | 208 ) |
| 206 re_failed = re.compile(r".*depends on.*") | 209 re_failed = re.compile(r".*\) depends on.*") |
| 207 deps_tree = {} | 210 deps_tree = {} |
| 208 deps_stack = [] | 211 deps_stack = [] |
| 209 deps_info = {} | 212 deps_info = {} |
| 210 for line in lines: | 213 for line in lines: |
| 211 m = re_deps.match(line) | 214 m = re_deps.match(line) |
| 212 m_orig = re_origdeps.match(line) | 215 m_orig = re_origdeps.match(line) |
| 213 m_installed = re_installed_package.match(line) | 216 m_installed = re_installed_package.match(line) |
| 214 if m: | 217 if m: |
| 215 pkgname = m.group("pkgname") | 218 pkgname = m.group("pkgname") |
| 216 pkgdir = m.group("pkgdir") | 219 pkgdir = m.group("pkgdir") |
| 217 pkgtype = m.group("pkgtype") | 220 pkgtype = m.group("pkgtype") |
| 218 indent = m.group("indent") | 221 indent = m.group("indent") |
| 219 doins = m.group("action") | 222 doins = m.group("action") |
| 220 deptype = m.group("deptype") | 223 deptype = m.group("deptype") |
| 221 depth = 1 | 224 depth = 1 |
| 222 if not indent: | 225 if not indent: |
| 223 depth = 0 | 226 depth = 0 |
| 224 version = m.group("version") | 227 version = m.group("version") |
| 225 | 228 |
| 226 # If we are indented, we should have | 229 # If we are indented, we should have |
| 227 # found a "depends on" previously. | 230 # found a "depends on" previously. |
| 228 if len(deps_stack) < depth: | 231 if len(deps_stack) < depth: |
| 229 print "FAIL: corrupt input at:" | 232 print "FAIL: corrupt input at:" |
| 230 print line | 233 print line |
| 231 print "No Parent." | 234 print "No Parent." |
| 232 sys.exit(1) | 235 sys.exit(1) |
| 233 | 236 |
| 234 # Go step by step through stack and tree | 237 # Go step by step through stack and tree |
| 235 # until we find our parent. Generate | 238 # until we find our parent. |
| 236 updatedep = deps_tree | 239 updatedep = deps_tree |
| 237 for i in range(0, depth): | 240 for i in range(0, depth): |
| 238 updatedep = updatedep[deps_stack[i]]["deps"] | 241 updatedep = updatedep[deps_stack[i]]["deps"] |
| 239 | 242 |
| 240 # Pretty print what we've captured. | 243 # Pretty print what we've captured. |
| 241 indent = "|" + "".ljust(depth, "_") | 244 indent = "|" + "".ljust(depth, "_") |
| 242 fullpkg = "%s/%s-%s" % (pkgdir, pkgname, version) | 245 fullpkg = "%s/%s-%s" % (pkgdir, pkgname, version) |
| 243 if VERBOSE: | 246 if VERBOSE: |
| 244 print ("" + indent + " " + pkgdir + "/" + pkgname + " - " + | 247 print ("" + indent + " " + pkgdir + "/" + pkgname + " - " + |
| 245 version + " (" + pkgtype + ", " + doins + | 248 version + " (" + pkgtype + ", " + doins + |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 294 fullpkg = "%s/%s-%s" % (pkgdir, pkgname, version) | 297 fullpkg = "%s/%s-%s" % (pkgdir, pkgname, version) |
| 295 deps_info[fullpkg] = {"idx": len(deps_info), | 298 deps_info[fullpkg] = {"idx": len(deps_info), |
| 296 "pkgdir": pkgdir, | 299 "pkgdir": pkgdir, |
| 297 "pkgname": pkgname, | 300 "pkgname": pkgname, |
| 298 "oldversion": oldversion, | 301 "oldversion": oldversion, |
| 299 "uninstall": uninstall} | 302 "uninstall": uninstall} |
| 300 else: | 303 else: |
| 301 # Is this a package that failed to match our huge regex? | 304 # Is this a package that failed to match our huge regex? |
| 302 m = re_failed.match(line) | 305 m = re_failed.match(line) |
| 303 if m: | 306 if m: |
| 307 print "\n".join(lines) |
| 304 print "FAIL: Couldn't understand line:" | 308 print "FAIL: Couldn't understand line:" |
| 305 print line | 309 print line |
| 306 sys.exit(1) | 310 sys.exit(1) |
| 307 | 311 |
| 308 return deps_tree, deps_info | 312 return deps_tree, deps_info |
| 309 | 313 |
| 310 | 314 |
| 311 def PrintTree(deps, depth=""): | 315 def PrintTree(deps, depth=""): |
| 312 """Print the deps we have seen in the emerge output. | 316 """Print the deps we have seen in the emerge output. |
| 313 | 317 |
| 314 Args: | 318 Args: |
| 315 deps: dependency tree structure. | 319 deps: Dependency tree structure. |
| 316 depth: allows printing the tree recursively, with indentation. | 320 depth: Allows printing the tree recursively, with indentation. |
| 317 """ | 321 """ |
| 318 for entry in deps: | 322 for entry in deps: |
| 319 action = deps[entry]["action"] | 323 action = deps[entry]["action"] |
| 320 print "%s %s (%s)" % (depth, entry, action) | 324 print "%s %s (%s)" % (depth, entry, action) |
| 321 PrintTree(deps[entry]["deps"], depth=depth + " ") | 325 PrintTree(deps[entry]["deps"], depth=depth + " ") |
| 322 | 326 |
| 323 | 327 |
| 324 def GenDependencyGraph(deps_tree, deps_info): | 328 def GenDependencyGraph(deps_tree, deps_info): |
| 325 """Generate a doubly linked dependency graph. | 329 """Generate a doubly linked dependency graph. |
| 326 | 330 |
| 327 Args: | 331 Args: |
| 328 deps_tree: dependency tree structure. | 332 deps_tree: Dependency tree structure. |
| 329 deps_info: more info on the dependencies. | 333 deps_info: More details on the dependencies. |
| 330 Returns: | 334 Returns: |
| 331 Deps graph in the form of a dict of packages, with each package | 335 Deps graph in the form of a dict of packages, with each package |
| 332 specifying a "needs" list and "provides" list. | 336 specifying a "needs" list and "provides" list. |
| 333 """ | 337 """ |
| 334 deps_map = {} | 338 deps_map = {} |
| 335 | 339 |
| 336 def ReverseTree(packages): | 340 def ReverseTree(packages): |
| 337 """Convert tree to digraph. | 341 """Convert tree to digraph. |
| 338 | 342 |
| 339 Take the tree of package -> requirements and reverse it to a digraph of | 343 Take the tree of package -> requirements and reverse it to a digraph of |
| 340 buildable packages -> packages they unblock | 344 buildable packages -> packages they unblock. |
| 341 Args: | 345 Args: |
| 342 packages: tree(s) of dependencies | 346 packages: Tree(s) of dependencies. |
| 343 Returns: | 347 Returns: |
| 344 unsanitized digraph | 348 Unsanitized digraph. |
| 345 """ | 349 """ |
| 346 for pkg in packages: | 350 for pkg in packages: |
| 347 action = packages[pkg]["action"] | 351 action = packages[pkg]["action"] |
| 348 this_pkg = deps_map.setdefault( | 352 this_pkg = deps_map.setdefault( |
| 349 pkg, {"needs": set(), "provides": set(), "action": "nomerge"}) | 353 pkg, {"needs": set(), "provides": set(), "action": "nomerge"}) |
| 350 if action != "nomerge": | 354 if action != "nomerge": |
| 351 this_pkg["action"] = action | 355 this_pkg["action"] = action |
| 352 this_pkg["deps_info"] = deps_info.get(pkg) | 356 this_pkg["deps_info"] = deps_info.get(pkg) |
| 353 ReverseTree(packages[pkg]["deps"]) | 357 ReverseTree(packages[pkg]["deps"]) |
| 354 for dep, dep_item in packages[pkg]["deps"].items(): | 358 for dep, dep_item in packages[pkg]["deps"].items(): |
| 355 dep_pkg = deps_map[dep] | 359 dep_pkg = deps_map[dep] |
| 356 dep_type = dep_item["deptype"] | 360 dep_type = dep_item["deptype"] |
| 357 if dep_type != "(runtime_post)": | 361 if dep_type != "(runtime_post)": |
| 358 dep_pkg["provides"].add(pkg) | 362 dep_pkg["provides"].add(pkg) |
| 359 this_pkg["needs"].add(dep) | 363 this_pkg["needs"].add(dep) |
| 360 | 364 |
| 361 def RemoveInstalledPackages(): | 365 def RemoveInstalledPackages(): |
| 362 """Remove installed packages, propagating dependencies""" | 366 """Remove installed packages, propagating dependencies.""" |
| 363 | 367 |
| 364 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys()) | 368 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys()) |
| 365 for pkg in rm_pkgs: | 369 for pkg in rm_pkgs: |
| 366 this_pkg = deps_map[pkg] | 370 this_pkg = deps_map[pkg] |
| 367 needs = this_pkg["needs"] | 371 needs = this_pkg["needs"] |
| 368 provides = this_pkg["provides"] | 372 provides = this_pkg["provides"] |
| 369 for dep in needs: | 373 for dep in needs: |
| 370 dep_provides = deps_map[dep]["provides"] | 374 dep_provides = deps_map[dep]["provides"] |
| 371 dep_provides.update(provides) | 375 dep_provides.update(provides) |
| 372 dep_provides.discard(pkg) | 376 dep_provides.discard(pkg) |
| 373 dep_provides.discard(dep) | 377 dep_provides.discard(dep) |
| 374 for target in provides: | 378 for target in provides: |
| 375 target_needs = deps_map[target]["needs"] | 379 target_needs = deps_map[target]["needs"] |
| 376 target_needs.update(needs) | 380 target_needs.update(needs) |
| 377 target_needs.discard(pkg) | 381 target_needs.discard(pkg) |
| 378 target_needs.discard(target) | 382 target_needs.discard(target) |
| 379 del deps_map[pkg] | 383 del deps_map[pkg] |
| 380 | 384 |
| 381 | |
| 382 def SanitizeDep(basedep, currdep, oldstack, limit): | 385 def SanitizeDep(basedep, currdep, oldstack, limit): |
| 383 """Search for circular deps between basedep and currdep, then recurse. | 386 """Search for circular deps between basedep and currdep, then recurse. |
| 384 | 387 |
| 385 Args: | 388 Args: |
| 386 basedep: original dependency, top of stack. | 389 basedep: Original dependency, top of stack. |
| 387 currdep: bottom of our current recursion, bottom of stack. | 390 currdep: Bottom of our current recursion, bottom of stack. |
| 388 oldstack: current dependency chain. | 391 oldstack: Current dependency chain. |
| 389 limit: how many more levels of recusion to go through, max. | 392 limit: How many more levels of recusion to go through, max. |
| 390 TODO(): Break RDEPEND preferentially. | 393 TODO(): Break RDEPEND preferentially. |
| 391 Returns: | 394 Returns: |
| 392 True iff circular dependencies are found. | 395 True iff circular dependencies are found. |
| 393 """ | 396 """ |
| 394 if limit == 0: | 397 if limit == 0: |
| 395 return | 398 return |
| 396 for dep in deps_map[currdep]["needs"]: | 399 for dep in deps_map[currdep]["needs"]: |
| 397 stack = oldstack + [dep] | 400 stack = oldstack + [dep] |
| 398 if basedep in deps_map[dep]["needs"] or dep == basedep: | 401 if basedep in deps_map[dep]["needs"] or dep == basedep: |
| 399 if dep != basedep: | 402 if dep != basedep: |
| 400 stack += [basedep] | 403 stack += [basedep] |
| 401 print "Remove cyclic dependency from:" | 404 print "Remove cyclic dependency from:" |
| 402 for i in xrange(0, len(stack) - 1): | 405 for i in xrange(0, len(stack) - 1): |
| 403 print " %s -> %s " % (stack[i], stack[i+1]) | 406 print " %s -> %s " % (stack[i], stack[i+1]) |
| 404 return True | 407 return True |
| 405 if dep not in oldstack and SanitizeDep(basedep, dep, stack, limit - 1): | 408 if dep not in oldstack and SanitizeDep(basedep, dep, stack, limit - 1): |
| 406 return True | 409 return True |
| 407 return | 410 return |
| 408 | 411 |
| 409 def SanitizeTree(): | 412 def SanitizeTree(): |
| 410 """Remove circular dependencies up to cycle length 32.""" | 413 """Remove circular dependencies up to cycle length 32.""" |
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 475 print "Pending %s, Ready %s, Running %s, Retrying %s, Total %s" % ( | 478 print "Pending %s, Ready %s, Running %s, Retrying %s, Total %s" % ( |
| 476 len(self._deps_map), len(self._emerge_queue), | 479 len(self._deps_map), len(self._emerge_queue), |
| 477 len(self._jobs), len(self._retry_queue), self._total_jobs) | 480 len(self._jobs), len(self._retry_queue), self._total_jobs) |
| 478 | 481 |
| 479 def _LaunchOneEmerge(self, target): | 482 def _LaunchOneEmerge(self, target): |
| 480 """Run emerge --nodeps to do a single package install. | 483 """Run emerge --nodeps to do a single package install. |
| 481 | 484 |
| 482 If this is a pseudopackage, that means we're done, and can select in in the | 485 If this is a pseudopackage, that means we're done, and can select in in the |
| 483 world file. | 486 world file. |
| 484 Args: | 487 Args: |
| 485 target: the full package name of the package to install. | 488 target: The full package name of the package to install. |
| 486 eg. "sys-apps/portage-2.17" | 489 eg. "sys-apps/portage-2.17" |
| 487 Returns: | 490 Returns: |
| 488 triplet containing (target name, subprocess object, output buffer object) | 491 Triplet containing (target name, subprocess object, output buffer object). |
| 489 """ | 492 """ |
| 490 if target.startswith("original-"): | 493 if target.startswith("original-"): |
| 491 # "original-" signifies one of the packages we originally requested. | 494 # "original-" signifies one of the packages we originally requested. |
| 492 # Since we have explicitly installed the versioned package as a dep of | 495 # Since we have explicitly installed the versioned package as a dep of |
| 493 # this, we only need to tag in "world" that we are done with this | 496 # this, we only need to tag in "world" that we are done with this |
| 494 # install request. "--select -n" indicates an addition to "world" | 497 # install request. "--select -n" indicates an addition to "world" |
| 495 # without an actual install. | 498 # without an actual install. |
| 496 newtarget = target.replace("original-", "") | 499 newtarget = target.replace("original-", "") |
| 497 cmdline = EmergeCommand() + " --nodeps --select --noreplace " + newtarget | 500 cmdline = EmergeCommand() + " --nodeps --select --noreplace " + newtarget |
| 498 else: | 501 else: |
| 499 # This package is a dependency of something we specifically | 502 # This package is a dependency of something we specifically |
| 500 # requested. Therefore we should install it but not allow it | 503 # requested. Therefore we should install it but not allow it |
| 501 # in the "world" file, which represents explicit intalls. | 504 # in the "world" file, which represents explicit intalls. |
| 502 # "--oneshot" here will prevent it from being tagged in world. | 505 # "--oneshot" here will prevent it from being tagged in world. |
| 503 cmdline = EmergeCommand() + " --nodeps --oneshot =" + target | 506 cmdline = EmergeCommand() + " --nodeps --oneshot =" + target |
| 504 deps_info = self._deps_map[target]["deps_info"] | 507 deps_info = self._deps_map[target]["deps_info"] |
| 505 if deps_info["uninstall"]: | 508 if deps_info["uninstall"]: |
| 506 package = "%(pkgdir)s/%(pkgname)s-%(oldversion)s" % deps_info | 509 package = "%(pkgdir)s/%(pkgname)s-%(oldversion)s" % deps_info |
| 507 cmdline += " && %s -1C =%s" % (EmergeCommand(), package) | 510 cmdline += " && %s -1C =%s" % (EmergeCommand(), package) |
| 508 | 511 |
| 509 print "+ %s" % cmdline | 512 print "+ %s" % cmdline |
| 510 | 513 |
| 511 # Store output in a temp file as it is too big for a unix pipe. | 514 # Store output in a temp file as it is too big for a unix pipe. |
| 512 stdout_buffer = tempfile.TemporaryFile() | 515 stdout_buffer = tempfile.TemporaryFile() |
| 513 # Modify the environment to disable locking. | 516 # Modify the environment to disable locking. |
| 514 portage_env = os.environ.copy() | 517 portage_env = os.environ.copy() |
| 515 portage_env["PORTAGE_LOCKS"] = "false" | 518 portage_env["PORTAGE_LOCKS"] = "false" |
| 516 portage_env["UNMERGE_DELAY"] = "0" | 519 portage_env["UNMERGE_DELAY"] = "0" |
| 517 # Autoclean rummages around in the portage database and uninstalls | 520 # Autoclean rummages around in the portage database and uninstalls |
| 518 # old packages. Definitely not necessary for build_image. However | 521 # old packages. It's not parallel safe, so we skip it. Instead, we |
| 519 # it may be necessary for incremental build_packages. It may also | 522 # handle the cleaning ourselves by uninstalling old versions of any |
| 520 # not be parallel safe. | 523 # new packages we install. |
| 521 if not AUTOCLEAN: | 524 if not AUTOCLEAN: |
| 522 portage_env["AUTOCLEAN"] = "no" | 525 portage_env["AUTOCLEAN"] = "no" |
| 523 # Launch the subprocess. | 526 # Launch the subprocess. |
| 524 emerge_proc = subprocess.Popen( | 527 emerge_proc = subprocess.Popen( |
| 525 cmdline, shell=True, stdout=stdout_buffer, | 528 cmdline, shell=True, stdout=stdout_buffer, |
| 526 stderr=subprocess.STDOUT, bufsize=64*1024, env=portage_env) | 529 stderr=subprocess.STDOUT, bufsize=64*1024, env=portage_env) |
| 527 | 530 |
| 528 return (target, emerge_proc, stdout_buffer) | 531 return (target, emerge_proc, stdout_buffer) |
| 529 | 532 |
| 530 def _Finish(self, target): | 533 def _Finish(self, target): |
| (...skipping 13 matching lines...) Expand all Loading... |
| 544 print "Retrying emerge of %s." % target | 547 print "Retrying emerge of %s." % target |
| 545 | 548 |
| 546 def Run(self): | 549 def Run(self): |
| 547 """Run through the scheduled ebuilds. | 550 """Run through the scheduled ebuilds. |
| 548 | 551 |
| 549 Keep running so long as we have uninstalled packages in the | 552 Keep running so long as we have uninstalled packages in the |
| 550 dependency graph to merge. | 553 dependency graph to merge. |
| 551 """ | 554 """ |
| 552 while self._deps_map: | 555 while self._deps_map: |
| 553 # If we have packages that are ready, kick them off. | 556 # If we have packages that are ready, kick them off. |
| 554 if self._emerge_queue: | 557 if self._emerge_queue and len(self._jobs) < JOBS: |
| 555 target = self._emerge_queue.pop(0) | 558 target = self._emerge_queue.pop(0) |
| 556 action = self._deps_map[target]["action"] | 559 action = self._deps_map[target]["action"] |
| 557 # We maintain a tree of all deps, if this doesn't need | 560 # We maintain a tree of all deps, if this doesn't need |
| 558 # to be installed just free up it's children and continue. | 561 # to be installed just free up it's children and continue. |
| 559 # It is possible to reinstall deps of deps, without reinstalling | 562 # It is possible to reinstall deps of deps, without reinstalling |
| 560 # first level deps, like so: | 563 # first level deps, like so: |
| 561 # chromeos (merge) -> eselect (nomerge) -> python (merge) | 564 # chromeos (merge) -> eselect (nomerge) -> python (merge) |
| 562 if action == "nomerge": | 565 if action == "nomerge": |
| 563 self._Finish(target) | 566 self._Finish(target) |
| 564 else: | 567 else: |
| 565 # Kick off the build if it's marked to be built. | 568 # Kick off the build if it's marked to be built. |
| 566 print "Emerging %s (%s)" % (target, action) | 569 print "Emerging %s (%s)" % (target, action) |
| 567 job = self._LaunchOneEmerge(target) | 570 job = self._LaunchOneEmerge(target) |
| 568 # Append it to the active jobs list. | 571 # Append it to the active jobs list. |
| 569 self._jobs.append(job) | 572 self._jobs.append(job) |
| 570 continue | 573 continue |
| 571 # Wait a bit to see if maybe some jobs finish. You can't | 574 # Wait a bit to see if maybe some jobs finish. You can't |
| 572 # wait on a set of jobs in python, so we'll just poll. | 575 # wait on a set of jobs in python, so we'll just poll. |
| 573 time.sleep(1) | 576 time.sleep(1) |
| 574 | 577 |
| 575 # Check here that we are actually waiting for something. | 578 # Check here that we are actually waiting for something. |
| 576 if (not self._emerge_queue and | 579 if (not self._emerge_queue and |
| 577 not self._jobs and | 580 not self._jobs and |
| 578 self._deps_map): | 581 self._deps_map): |
| 579 # If we have failed on a package retry it now. | 582 # If we have failed on a package, retry it now. |
| 580 if self._retry_queue: | 583 if self._retry_queue: |
| 581 self._Retry() | 584 self._Retry() |
| 582 # If we have failed a package twice, just give up. | 585 # If we have failed a package twice, just give up. |
| 583 elif self._failed: | 586 elif self._failed: |
| 584 for failure, output in self._failed.items(): | 587 for failure, output in self._failed.items(): |
| 585 print "Package failed: %s" % failure | 588 print "Package failed: %s" % failure |
| 586 print output | 589 print output |
| 587 PrintDepsMap(self._deps_map) | 590 PrintDepsMap(self._deps_map) |
| 588 print "Packages failed: %s" % " ,".join(self._failed.keys()) | 591 print "Packages failed: %s" % " ,".join(self._failed.keys()) |
| 589 sys.exit(1) | 592 sys.exit(1) |
| (...skipping 10 matching lines...) Expand all Loading... |
| 600 # Clean up the subprocess. | 603 # Clean up the subprocess. |
| 601 job.wait() | 604 job.wait() |
| 602 # Get the output if we want to print it. | 605 # Get the output if we want to print it. |
| 603 stdout.seek(0) | 606 stdout.seek(0) |
| 604 output = stdout.read() | 607 output = stdout.read() |
| 605 | 608 |
| 606 # Remove from active jobs list, we are done with this process. | 609 # Remove from active jobs list, we are done with this process. |
| 607 self._jobs.remove((target, job, stdout)) | 610 self._jobs.remove((target, job, stdout)) |
| 608 | 611 |
| 609 # Print if necessary. | 612 # Print if necessary. |
| 610 if VERBOSE: | 613 if VERBOSE or job.returncode != 0: |
| 611 print output | 614 print output |
| 612 if job.returncode != 0: | 615 if job.returncode != 0: |
| 613 # Handle job failure. | 616 # Handle job failure. |
| 614 if target in self._failed: | 617 if target in self._failed: |
| 615 # If this job has failed previously, give up. | 618 # If this job has failed previously, give up. |
| 616 print "Failed %s. Your build has failed." % target | 619 print "Failed %s. Your build has failed." % target |
| 617 else: | 620 else: |
| 618 # Queue up this build to try again after a long while. | 621 # Queue up this build to try again after a long while. |
| 619 self._retry_queue.append(target) | 622 self._retry_queue.append(target) |
| 620 self._failed[target] = output | 623 self._failed[target] = output |
| 621 print "Failed %s, retrying later." % target | 624 print "Failed %s, retrying later." % target |
| 622 else: | 625 else: |
| 623 if target in self._failed and self._retry_queue: | 626 if target in self._failed and self._retry_queue: |
| 624 # If we have successfully retried a failed package, and there | 627 # If we have successfully retried a failed package, and there |
| 625 # are more failed packages, try the next one. We will only have | 628 # are more failed packages, try the next one. We will only have |
| 626 # one retrying package actively running at a time. | 629 # one retrying package actively running at a time. |
| 627 self._Retry() | 630 self._Retry() |
| 628 | 631 |
| 629 print "Completed %s" % target | 632 print "Completed %s" % target |
| 630 # Mark as completed and unblock waiting ebuilds. | 633 # Mark as completed and unblock waiting ebuilds. |
| 631 self._Finish(target) | 634 self._Finish(target) |
| 632 | 635 |
| 633 # Print an update. | 636 # Print an update. |
| 634 self._Status() | 637 self._Status() |
| 635 | 638 |
| 636 | 639 |
| 637 # Main control code. | 640 # Main control code. |
| 641 PACKAGE, EMERGE_ARGS, BOARD, JOBS = ParseArgs(sys.argv) |
| 642 |
| 643 if not PACKAGE: |
| 644 # No packages. Pass straight through to emerge. |
| 645 # Allows users to just type ./parallel_emerge --depclean |
| 646 sys.exit(os.system(EmergeCommand())) |
| 647 |
| 638 print "Starting fast-emerge." | 648 print "Starting fast-emerge." |
| 639 PACKAGE, EMERGE_ARGS, BOARD = ParseArgs(sys.argv) | |
| 640 print " Building package %s on %s (%s)" % (PACKAGE, EMERGE_ARGS, BOARD) | 649 print " Building package %s on %s (%s)" % (PACKAGE, EMERGE_ARGS, BOARD) |
| 641 | |
| 642 print "Running emerge to generate deps" | 650 print "Running emerge to generate deps" |
| 643 deps_output = GetDepsFromPortage(PACKAGE) | 651 deps_output = GetDepsFromPortage(PACKAGE) |
| 644 print "Processing emerge output" | 652 print "Processing emerge output" |
| 645 dependency_tree, dependency_info = DepsToTree(deps_output) | 653 dependency_tree, dependency_info = DepsToTree(deps_output) |
| 646 if VERBOSE: | 654 if VERBOSE: |
| 647 print "Print tree" | 655 print "Print tree" |
| 648 PrintTree(dependency_tree) | 656 PrintTree(dependency_tree) |
| 649 | 657 |
| 650 print "Generate dependency graph." | 658 print "Generate dependency graph." |
| 651 dependency_graph = GenDependencyGraph(dependency_tree, dependency_info) | 659 dependency_graph = GenDependencyGraph(dependency_tree, dependency_info) |
| 652 | 660 |
| 653 if VERBOSE: | 661 if VERBOSE: |
| 654 PrintDepsMap(dependency_graph) | 662 PrintDepsMap(dependency_graph) |
| 655 | 663 |
| 656 # Run the queued emerges. | 664 # Run the queued emerges. |
| 657 scheduler = EmergeQueue(dependency_graph) | 665 scheduler = EmergeQueue(dependency_graph) |
| 658 scheduler.Run() | 666 scheduler.Run() |
| 659 | 667 |
| 660 print "Done" | 668 print "Done" |
| 661 | 669 |
| OLD | NEW |