| OLD | NEW |
| 1 #!/usr/bin/python2.6 | 1 #!/usr/bin/python2.6 |
| 2 # Copyright (c) 2010 The Chromium OS Authors. All rights reserved. | 2 # Copyright (c) 2010 The Chromium OS Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
| 5 | 5 |
| 6 """Program to run emerge in parallel, for significant speedup. | 6 """Program to run emerge in parallel, for significant speedup. |
| 7 | 7 |
| 8 Usage: | 8 Usage: |
| 9 ./parallel_emerge --board=BOARD [emerge args] package | 9 ./parallel_emerge --board=BOARD [emerge args] package |
| 10 | 10 |
| (...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 131 | 131 |
| 132 def GetDepsFromPortage(package): | 132 def GetDepsFromPortage(package): |
| 133 """Get dependency tree info by running emerge. | 133 """Get dependency tree info by running emerge. |
| 134 | 134 |
| 135 Run 'emerge -p --debug package', and get a text output of all deps. | 135 Run 'emerge -p --debug package', and get a text output of all deps. |
| 136 TODO(): Put dep caclation in a library, as cros_extract_deps | 136 TODO(): Put dep caclation in a library, as cros_extract_deps |
| 137 also uses this code. | 137 also uses this code. |
| 138 Args: | 138 Args: |
| 139 package: string containing the packages to build. | 139 package: string containing the packages to build. |
| 140 Returns: | 140 Returns: |
| 141 text output of emege -p --debug, which can be processed elsewhere. | 141 text output of emerge -p --debug, which can be processed elsewhere. |
| 142 """ | 142 """ |
| 143 print "Calculating deps for package %s" % package | 143 print "Calculating deps for package %s" % package |
| 144 cmdline = EmergeCommand() + " -p --debug " + package | 144 cmdline = EmergeCommand() + " -p --debug --color=n " + package |
| 145 print "+ %s" % cmdline | 145 print "+ %s" % cmdline |
| 146 | 146 |
| 147 # Store output in a temp file as it is too big for a unix pipe. | 147 # Store output in a temp file as it is too big for a unix pipe. |
| 148 stderr_buffer = tempfile.TemporaryFile() | 148 stderr_buffer = tempfile.TemporaryFile() |
| 149 stdout_buffer = tempfile.TemporaryFile() | 149 stdout_buffer = tempfile.TemporaryFile() |
| 150 # Launch the subprocess. | 150 # Launch the subprocess. |
| 151 start = time.time() |
| 151 depsproc = subprocess.Popen(shlex.split(cmdline), stderr=stderr_buffer, | 152 depsproc = subprocess.Popen(shlex.split(cmdline), stderr=stderr_buffer, |
| 152 stdout=stdout_buffer, bufsize=64*1024) | 153 stdout=stdout_buffer, bufsize=64*1024) |
| 153 | |
| 154 # Wait for this to complete. | |
| 155 seconds = 0 | |
| 156 while depsproc.poll() is not None: | |
| 157 seconds += 1 | |
| 158 time.sleep(1) | |
| 159 if seconds % 5 == 0: | |
| 160 print ".", | |
| 161 print " done" | |
| 162 | |
| 163 print "Deps calculated in %d:%02ds" % (seconds / 60, seconds % 60) | |
| 164 | |
| 165 depsproc.wait() | 154 depsproc.wait() |
| 155 seconds = time.time() - start |
| 156 print "Deps calculated in %d:%04.1fs" % (seconds / 60, seconds % 60) |
| 166 stderr_buffer.seek(0) | 157 stderr_buffer.seek(0) |
| 167 stderr_raw = stderr_buffer.read() | 158 stderr_raw = stderr_buffer.read() |
| 168 info_start = stderr_raw.find("digraph") | 159 info_start = stderr_raw.find("digraph") |
| 160 stdout_buffer.seek(0) |
| 161 stdout_raw = stdout_buffer.read() |
| 162 lines = [] |
| 169 if info_start != -1: | 163 if info_start != -1: |
| 170 stdout = stderr_raw[info_start:] | 164 lines = stderr_raw[info_start:].split("\n") |
| 171 else: | 165 lines.extend(stdout_raw.split("\n")) |
| 172 stdout_buffer.seek(0) | |
| 173 stdout_raw = stdout_buffer.read() | |
| 174 stdout = stderr_raw + stdout_raw | |
| 175 if VERBOSE or depsproc.returncode != 0: | 166 if VERBOSE or depsproc.returncode != 0: |
| 176 print stdout | 167 output = stderr_raw + stdout_raw |
| 168 print output |
| 177 if depsproc.returncode != 0: | 169 if depsproc.returncode != 0: |
| 178 print "Failed to generate deps" | 170 print "Failed to generate deps" |
| 179 sys.exit(1) | 171 sys.exit(1) |
| 180 | 172 |
| 181 lines = stdout.split("\n") | |
| 182 return lines | 173 return lines |
| 183 | 174 |
| 184 | 175 |
| 185 def DepsToTree(lines): | 176 def DepsToTree(lines): |
| 186 """Regex the emerge --tree output to generate a nested dict of dependencies. | 177 """Regex the output from 'emerge --debug' to generate a nested dict of deps. |
| 187 | 178 |
| 188 Args: | 179 Args: |
| 189 lines: text dump from 'emerge -p --tree package' | 180 lines: output from 'emerge -p --debug package' |
| 190 Returns: | 181 Returns: |
| 191 dep_tree: nested dict of dependencies, as specified by emerge. | 182 dep_tree: nested dict of dependencies, as specified by emerge. |
| 192 there may be dupes, or circular deps. | 183 there may be dupes, or circular deps. |
| 193 | 184 |
| 194 We need to regex lines as follows: | 185 We need to regex lines as follows: |
| 195 hard-host-depends depends on | 186 hard-host-depends depends on |
| 196 ('ebuild', '/', 'dev-lang/swig-1.3.36', 'merge') depends on | 187 ('ebuild', '/', 'dev-lang/swig-1.3.36', 'merge') depends on |
| 197 ('ebuild', '/', 'dev-lang/perl-5.8.8-r8', 'merge') (buildtime) | 188 ('ebuild', '/', 'dev-lang/perl-5.8.8-r8', 'merge') (buildtime) |
| 198 ('binary', '/.../rootfs/', 'sys-auth/policykit-0.9-r1', 'merge') depends on | 189 ('binary', '/.../rootfs/', 'sys-auth/policykit-0.9-r1', 'merge') depends on |
| 199 ('binary', '/.../rootfs/', 'x11-misc/xbitmaps-1.1.0', 'merge') (no children) | 190 ('binary', '/.../rootfs/', 'x11-misc/xbitmaps-1.1.0', 'merge') (no children) |
| 200 """ | 191 """ |
| 201 | 192 |
| 202 re_deps = re.compile(r"(?P<indent>\W*)\(\'(?P<pkgtype>\w+)\', " | 193 re_deps = re.compile(r"(?P<indent>\W*)\(\'(?P<pkgtype>\w+)\', " |
| 203 r"\'(?P<destination>[\w/\.-]+)\'," | 194 r"\'(?P<destination>[\w/\.-]+)\'," |
| 204 r" \'(?P<pkgdir>[\w\+-]+)/(?P<pkgname>[\w\+-]+)-" | 195 r" \'(?P<pkgdir>[\w\+-]+)/(?P<pkgname>[\w\+-]+)-" |
| 205 r"(?P<version>\d+[\w\.-]*)\', \'(?P<action>\w+)\'\) " | 196 r"(?P<version>\d+[\w\.-]*)\', \'(?P<action>\w+)\'\) " |
| 206 r"(?P<deptype>(depends on|\(.*\)))") | 197 r"(?P<deptype>(depends on|\(.*\)))") |
| 207 re_origdeps = re.compile(r"(?P<pkgname>[\w\+/-]+) depends on") | 198 re_origdeps = re.compile(r"(?P<pkgname>[\w\+/-]+) depends on") |
| 199 re_installed_package = re.compile( |
| 200 r"\[(?P<desc>[^\]]*)\] " |
| 201 r"(?P<pkgdir>[\w\+-]+)/" |
| 202 r"(?P<pkgname>[\w\+-]+)-" |
| 203 r"(?P<version>\d+[\w\.-]*)( \[" |
| 204 r"(?P<oldversion>\d+[\w\.-]*)\])?" |
| 205 ) |
| 208 re_failed = re.compile(r".*depends on.*") | 206 re_failed = re.compile(r".*depends on.*") |
| 209 | |
| 210 deps_tree = {} | 207 deps_tree = {} |
| 211 deps_stack = [] | 208 deps_stack = [] |
| 209 deps_info = {} |
| 212 for line in lines: | 210 for line in lines: |
| 213 m = re_deps.match(line) | 211 m = re_deps.match(line) |
| 214 m_orig = re_origdeps.match(line) | 212 m_orig = re_origdeps.match(line) |
| 213 m_installed = re_installed_package.match(line) |
| 215 if m: | 214 if m: |
| 216 pkgname = m.group("pkgname") | 215 pkgname = m.group("pkgname") |
| 217 pkgdir = m.group("pkgdir") | 216 pkgdir = m.group("pkgdir") |
| 218 pkgtype = m.group("pkgtype") | 217 pkgtype = m.group("pkgtype") |
| 219 indent = m.group("indent") | 218 indent = m.group("indent") |
| 220 doins = m.group("action") | 219 doins = m.group("action") |
| 221 deptype = m.group("deptype") | 220 deptype = m.group("deptype") |
| 222 depth = 1 | 221 depth = 1 |
| 223 if not indent: | 222 if not indent: |
| 224 depth = 0 | 223 depth = 0 |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 276 updatedep.setdefault(pkgname, {}) | 275 updatedep.setdefault(pkgname, {}) |
| 277 updatedep[pkgname].setdefault("deps", {}) | 276 updatedep[pkgname].setdefault("deps", {}) |
| 278 # Add the type of dep. | 277 # Add the type of dep. |
| 279 updatedep[pkgname].setdefault("action", "world") | 278 updatedep[pkgname].setdefault("action", "world") |
| 280 updatedep[pkgname].setdefault("deptype", "normal") | 279 updatedep[pkgname].setdefault("deptype", "normal") |
| 281 | 280 |
| 282 # Drop any obsolete stack entries. | 281 # Drop any obsolete stack entries. |
| 283 deps_stack = deps_stack[0:depth] | 282 deps_stack = deps_stack[0:depth] |
| 284 # Add ourselves to the end of the stack. | 283 # Add ourselves to the end of the stack. |
| 285 deps_stack.append(pkgname) | 284 deps_stack.append(pkgname) |
| 285 elif m_installed: |
| 286 pkgname = m_installed.group("pkgname") |
| 287 pkgdir = m_installed.group("pkgdir") |
| 288 version = m_installed.group("version") |
| 289 oldversion = m_installed.group("oldversion") |
| 290 desc = m_installed.group("desc") |
| 291 uninstall = False |
| 292 if oldversion and (desc.find("U") != -1 or desc.find("D") != -1): |
| 293 uninstall = True |
| 294 fullpkg = "%s/%s-%s" % (pkgdir, pkgname, version) |
| 295 deps_info[fullpkg] = {"idx": len(deps_info), |
| 296 "pkgdir": pkgdir, |
| 297 "pkgname": pkgname, |
| 298 "oldversion": oldversion, |
| 299 "uninstall": uninstall} |
| 286 else: | 300 else: |
| 287 # Is this a package that failed to match uor huge regex? | 301 # Is this a package that failed to match our huge regex? |
| 288 m = re_failed.match(line) | 302 m = re_failed.match(line) |
| 289 if m: | 303 if m: |
| 290 print "FAIL: Couldn't understand line:" | 304 print "FAIL: Couldn't understand line:" |
| 291 print line | 305 print line |
| 292 sys.exit(1) | 306 sys.exit(1) |
| 293 | 307 |
| 294 return deps_tree | 308 return deps_tree, deps_info |
| 295 | 309 |
| 296 | 310 |
| 297 def PrintTree(deps, depth=""): | 311 def PrintTree(deps, depth=""): |
| 298 """Print the deps we have seen in the emerge output. | 312 """Print the deps we have seen in the emerge output. |
| 299 | 313 |
| 300 Args: | 314 Args: |
| 301 deps: dependency tree structure. | 315 deps: dependency tree structure. |
| 302 depth: allows printing the tree recursively, with indentation. | 316 depth: allows printing the tree recursively, with indentation. |
| 303 """ | 317 """ |
| 304 for entry in deps: | 318 for entry in deps: |
| 305 action = deps[entry]["action"] | 319 action = deps[entry]["action"] |
| 306 print "%s %s (%s)" % (depth, entry, action) | 320 print "%s %s (%s)" % (depth, entry, action) |
| 307 PrintTree(deps[entry]["deps"], depth=depth + " ") | 321 PrintTree(deps[entry]["deps"], depth=depth + " ") |
| 308 | 322 |
| 309 | 323 |
| 310 def GenDependencyGraph(deps_tree): | 324 def GenDependencyGraph(deps_tree, deps_info): |
| 311 """Generate a doubly linked dependency graph. | 325 """Generate a doubly linked dependency graph. |
| 312 | 326 |
| 313 Args: | 327 Args: |
| 314 deps_tree: dependency tree structure. | 328 deps_tree: dependency tree structure. |
| 329 deps_info: more info on the dependencies. |
| 315 Returns: | 330 Returns: |
| 316 Deps graph in the form of a dict of packages, with each package | 331 Deps graph in the form of a dict of packages, with each package |
| 317 specifying a "needs" list and "provides" list. | 332 specifying a "needs" list and "provides" list. |
| 318 """ | 333 """ |
| 319 deps_map = {} | 334 deps_map = {} |
| 320 | 335 |
| 321 def ReverseTree(packages): | 336 def ReverseTree(packages): |
| 322 """Convert tree to digraph. | 337 """Convert tree to digraph. |
| 323 | 338 |
| 324 Take the tree of package -> requirements and reverse it to a digraph of | 339 Take the tree of package -> requirements and reverse it to a digraph of |
| 325 buildable packages -> packages they unblock | 340 buildable packages -> packages they unblock |
| 326 Args: | 341 Args: |
| 327 packages: tree(s) of dependencies | 342 packages: tree(s) of dependencies |
| 328 Returns: | 343 Returns: |
| 329 unsanitized digraph | 344 unsanitized digraph |
| 330 """ | 345 """ |
| 331 for pkg in packages: | 346 for pkg in packages: |
| 332 action = packages[pkg]["action"] | 347 action = packages[pkg]["action"] |
| 333 this_pkg = deps_map.setdefault( | 348 this_pkg = deps_map.setdefault( |
| 334 pkg, {"needs": {}, "provides": set(), "action": "nomerge"}) | 349 pkg, {"needs": set(), "provides": set(), "action": "nomerge"}) |
| 335 if action != "nomerge": | 350 if action != "nomerge": |
| 336 this_pkg["action"] = action | 351 this_pkg["action"] = action |
| 352 this_pkg["deps_info"] = deps_info.get(pkg) |
| 337 ReverseTree(packages[pkg]["deps"]) | 353 ReverseTree(packages[pkg]["deps"]) |
| 338 for dep, dep_item in packages[pkg]["deps"].items(): | 354 for dep, dep_item in packages[pkg]["deps"].items(): |
| 339 dep_pkg = deps_map[dep] | 355 dep_pkg = deps_map[dep] |
| 340 dep_type = dep_item["deptype"] | 356 dep_type = dep_item["deptype"] |
| 341 if dep_type == "(runtime_post)": | 357 if dep_type != "(runtime_post)": |
| 342 dep_pkg["needs"][pkg] = dep_type | |
| 343 this_pkg["provides"].add(dep) | |
| 344 else: | |
| 345 dep_pkg["provides"].add(pkg) | 358 dep_pkg["provides"].add(pkg) |
| 346 this_pkg["needs"][dep] = dep_type | 359 this_pkg["needs"].add(dep) |
| 360 |
| 361 def RemoveInstalledPackages(): |
| 362 """Remove installed packages, propagating dependencies""" |
| 363 |
| 364 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys()) |
| 365 for pkg in rm_pkgs: |
| 366 this_pkg = deps_map[pkg] |
| 367 needs = this_pkg["needs"] |
| 368 provides = this_pkg["provides"] |
| 369 for dep in needs: |
| 370 dep_provides = deps_map[dep]["provides"] |
| 371 dep_provides.update(provides) |
| 372 dep_provides.discard(pkg) |
| 373 dep_provides.discard(dep) |
| 374 for target in provides: |
| 375 target_needs = deps_map[target]["needs"] |
| 376 target_needs.update(needs) |
| 377 target_needs.discard(pkg) |
| 378 target_needs.discard(target) |
| 379 del deps_map[pkg] |
| 380 |
| 347 | 381 |
| 348 def SanitizeDep(basedep, currdep, oldstack, limit): | 382 def SanitizeDep(basedep, currdep, oldstack, limit): |
| 349 """Remove any circular dependencies between basedep, currdep, then recurse. | 383 """Search for circular deps between basedep and currdep, then recurse. |
| 350 | 384 |
| 351 Args: | 385 Args: |
| 352 basedep: original dependency, top of stack. | 386 basedep: original dependency, top of stack. |
| 353 currdep: bottom of our current recursion, bottom of stack. | 387 currdep: bottom of our current recursion, bottom of stack. |
| 354 oldstack: current dependency chain. | 388 oldstack: current dependency chain. |
| 355 limit: how many more levels of recusion to go through, max. | 389 limit: how many more levels of recusion to go through, max. |
| 356 TODO(): Break PDEPEND preferentially, then RDEPEND. Also extract emerge | 390 TODO(): Break RDEPEND preferentially. |
| 357 linear ordering and break cycles on default emerge linear order. | 391 Returns: |
| 392 True iff circular dependencies are found. |
| 358 """ | 393 """ |
| 359 if limit == 0: | 394 if limit == 0: |
| 360 return | 395 return |
| 361 for dep in deps_map[currdep]["needs"]: | 396 for dep in deps_map[currdep]["needs"]: |
| 362 stack = oldstack + [dep] | 397 stack = oldstack + [dep] |
| 363 if basedep in deps_map[dep]["needs"]: | 398 if basedep in deps_map[dep]["needs"] or dep == basedep: |
| 399 if dep != basedep: |
| 400 stack += [basedep] |
| 364 print "Remove cyclic dependency from:" | 401 print "Remove cyclic dependency from:" |
| 365 for i in xrange(0, len(stack) - 1): | 402 for i in xrange(0, len(stack) - 1): |
| 366 print " %s (%s)-> %s " % ( | 403 print " %s -> %s " % (stack[i], stack[i+1]) |
| 367 stack[i], deps_map[stack[i]]["needs"][stack[i+1]], stack[i+1]) | 404 return True |
| 368 del deps_map[dep]["needs"][basedep] | 405 if dep not in oldstack and SanitizeDep(basedep, dep, stack, limit - 1): |
| 369 deps_map[basedep]["provides"].remove(dep) | 406 return True |
| 370 SanitizeDep(basedep, dep, stack, limit - 1) | 407 return |
| 371 | 408 |
| 372 def SanitizeTree(): | 409 def SanitizeTree(): |
| 373 """Remove circular dependencies up to cycle length 8.""" | 410 """Remove circular dependencies up to cycle length 32.""" |
| 374 for dep in deps_map: | 411 start = time.time() |
| 375 SanitizeDep(dep, dep, [dep], 8) | 412 for basedep in deps_map: |
| 413 for dep in deps_map[basedep]["needs"].copy(): |
| 414 if deps_info[basedep]["idx"] <= deps_info[dep]["idx"]: |
| 415 if SanitizeDep(basedep, dep, [basedep, dep], 31): |
| 416 print "Breaking", basedep, " -> ", dep |
| 417 deps_map[basedep]["needs"].remove(dep) |
| 418 deps_map[dep]["provides"].remove(basedep) |
| 419 seconds = time.time() - start |
| 420 print "Tree sanitized in %d:%04.1fs" % (seconds / 60, seconds % 60) |
| 376 | 421 |
| 377 def AddSecretDeps(): | 422 def AddSecretDeps(): |
| 378 """Find these tagged packages and add extra dependencies. | 423 """Find these tagged packages and add extra dependencies. |
| 379 | 424 |
| 380 For debugging dependency problems. | 425 For debugging dependency problems. |
| 381 """ | 426 """ |
| 382 for bad in secret_deps: | 427 for bad in secret_deps: |
| 383 needed = secret_deps[bad] | 428 needed = secret_deps[bad] |
| 384 bad_pkg = None | 429 bad_pkg = None |
| 385 needed_pkg = None | 430 needed_pkg = None |
| 386 for dep in deps_map: | 431 for dep in deps_map: |
| 387 if dep.find(bad) != -1: | 432 if dep.find(bad) != -1: |
| 388 bad_pkg = dep | 433 bad_pkg = dep |
| 389 if dep.find(needed) != -1: | 434 if dep.find(needed) != -1: |
| 390 needed_pkg = dep | 435 needed_pkg = dep |
| 391 if bad_pkg and needed_pkg: | 436 if bad_pkg and needed_pkg: |
| 392 deps_map[needed_pkg]["provides"].add(bad_pkg) | 437 deps_map[needed_pkg]["provides"].add(bad_pkg) |
| 393 deps_map[bad_pkg]["needs"][needed_pkg] = "(manually forced)" | 438 deps_map[bad_pkg]["needs"].add(needed_pkg) |
| 394 | 439 |
| 395 ReverseTree(deps_tree) | 440 ReverseTree(deps_tree) |
| 396 AddSecretDeps() | 441 AddSecretDeps() |
| 442 RemoveInstalledPackages() |
| 397 SanitizeTree() | 443 SanitizeTree() |
| 398 return deps_map | 444 return deps_map |
| 399 | 445 |
| 400 | 446 |
| 401 def PrintDepsMap(deps_map): | 447 def PrintDepsMap(deps_map): |
| 402 """Print dependency graph, for each package list it's prerequisites.""" | 448 """Print dependency graph, for each package list it's prerequisites.""" |
| 403 for i in deps_map: | 449 for i in deps_map: |
| 404 print "%s: (%s) needs" % (i, deps_map[i]["action"]) | 450 print "%s: (%s) needs" % (i, deps_map[i]["action"]) |
| 405 for j, dep_type in deps_map[i]["needs"].items(): | 451 for j in deps_map[i]["needs"]: |
| 406 print " %s ( %s )" % (j, dep_type) | 452 print " %s" % (j) |
| 407 | 453 |
| 408 | 454 |
| 409 class EmergeQueue(object): | 455 class EmergeQueue(object): |
| 410 """Class to schedule emerge jobs according to a dependency graph.""" | 456 """Class to schedule emerge jobs according to a dependency graph.""" |
| 411 | 457 |
| 412 def __init__(self, deps_map): | 458 def __init__(self, deps_map): |
| 413 # Store the dependency graph. | 459 # Store the dependency graph. |
| 414 self._deps_map = deps_map | 460 self._deps_map = deps_map |
| 415 # Initialize the runnable queue to empty. | 461 # Initialize the runnable queue to empty. |
| 416 self._jobs = [] | 462 self._jobs = [] |
| 417 # List of total package installs represented in deps_map. | 463 # List of total package installs represented in deps_map. |
| 418 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"] | 464 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"] |
| 419 self._total_jobs = len(install_jobs) | 465 self._total_jobs = len(install_jobs) |
| 420 | 466 |
| 421 # Initialize the ready queue, these are jobs with no unmet dependencies. | 467 # Initialize the ready queue, these are jobs with no unmet dependencies. |
| 422 self._emerge_queue = [x for x in deps_map if not deps_map[x]["needs"]] | 468 self._emerge_queue = [x for x in deps_map if not deps_map[x]["needs"]] |
| 423 # Initialize the failed queue to empty. | 469 # Initialize the failed queue to empty. |
| 424 self._retry_queue = [] | 470 self._retry_queue = [] |
| 425 self._failed = {} | 471 self._failed = {} |
| 426 | 472 |
| 427 def _Status(self): | 473 def _Status(self): |
| 428 """Print status.""" | 474 """Print status.""" |
| 429 print "Pending %s, Ready %s, Running %s, Failed %s, Total %s" % ( | 475 print "Pending %s, Ready %s, Running %s, Retrying %s, Total %s" % ( |
| 430 len(self._deps_map), len(self._emerge_queue), | 476 len(self._deps_map), len(self._emerge_queue), |
| 431 len(self._jobs), len(self._failed), self._total_jobs) | 477 len(self._jobs), len(self._retry_queue), self._total_jobs) |
| 432 | 478 |
| 433 def _LaunchOneEmerge(self, target): | 479 def _LaunchOneEmerge(self, target): |
| 434 """Run emerge --nodeps to do a single package install. | 480 """Run emerge --nodeps to do a single package install. |
| 435 | 481 |
| 436 If this is a pseudopackage, that means we're done, and can select in in the | 482 If this is a pseudopackage, that means we're done, and can select in in the |
| 437 world file. | 483 world file. |
| 438 Args: | 484 Args: |
| 439 target: the full package name of the package to install. | 485 target: the full package name of the package to install. |
| 440 eg. "sys-apps/portage-2.17" | 486 eg. "sys-apps/portage-2.17" |
| 441 Returns: | 487 Returns: |
| 442 triplet containing (target name, subprocess object, output buffer object) | 488 triplet containing (target name, subprocess object, output buffer object) |
| 443 """ | 489 """ |
| 444 if target.startswith("original-"): | 490 if target.startswith("original-"): |
| 445 # "original-" signifies one of the packages we originally requested. | 491 # "original-" signifies one of the packages we originally requested. |
| 446 # Since we have explicitly installed the versioned package as a dep of | 492 # Since we have explicitly installed the versioned package as a dep of |
| 447 # this, we only need to tag in "world" that we are done with this | 493 # this, we only need to tag in "world" that we are done with this |
| 448 # install request. "--select -n" indicates an addition to "world" | 494 # install request. "--select -n" indicates an addition to "world" |
| 449 # without an actual install. | 495 # without an actual install. |
| 450 newtarget = target.replace("original-", "") | 496 newtarget = target.replace("original-", "") |
| 451 cmdline = EmergeCommand() + " --nodeps --select --noreplace " + newtarget | 497 cmdline = EmergeCommand() + " --nodeps --select --noreplace " + newtarget |
| 452 else: | 498 else: |
| 453 # This package is a dependency of something we specifically | 499 # This package is a dependency of something we specifically |
| 454 # requested. Therefore we should install it but not allow it | 500 # requested. Therefore we should install it but not allow it |
| 455 # in the "world" file, which represents explicit intalls. | 501 # in the "world" file, which represents explicit intalls. |
| 456 # "--oneshot" here will prevent it from being tagged in world. | 502 # "--oneshot" here will prevent it from being tagged in world. |
| 457 cmdline = EmergeCommand() + " --nodeps --oneshot =" + target | 503 cmdline = EmergeCommand() + " --nodeps --oneshot =" + target |
| 458 if VERBOSE: | 504 deps_info = self._deps_map[target]["deps_info"] |
| 459 print "running %s" % cmdline | 505 if deps_info["uninstall"]: |
| 506 package = "%(pkgdir)s/%(pkgname)s-%(oldversion)s" % deps_info |
| 507 cmdline += " && %s -1C =%s" % (EmergeCommand(), package) |
| 508 |
| 509 print "+ %s" % cmdline |
| 460 | 510 |
| 461 # Store output in a temp file as it is too big for a unix pipe. | 511 # Store output in a temp file as it is too big for a unix pipe. |
| 462 stdout_buffer = tempfile.TemporaryFile() | 512 stdout_buffer = tempfile.TemporaryFile() |
| 463 # Modify the environment to disable locking. | 513 # Modify the environment to disable locking. |
| 464 portage_env = os.environ.copy() | 514 portage_env = os.environ.copy() |
| 465 portage_env["PORTAGE_LOCKS"] = "false" | 515 portage_env["PORTAGE_LOCKS"] = "false" |
| 516 portage_env["UNMERGE_DELAY"] = "0" |
| 466 # Autoclean rummages around in the portage database and uninstalls | 517 # Autoclean rummages around in the portage database and uninstalls |
| 467 # old packages. Definitely not necessary for build_image. However | 518 # old packages. Definitely not necessary for build_image. However |
| 468 # it may be necessary for incremental build_packages. It may also | 519 # it may be necessary for incremental build_packages. It may also |
| 469 # not be parallel safe. | 520 # not be parallel safe. |
| 470 if not AUTOCLEAN: | 521 if not AUTOCLEAN: |
| 471 portage_env["AUTOCLEAN"] = "no" | 522 portage_env["AUTOCLEAN"] = "no" |
| 472 # Launch the subprocess. | 523 # Launch the subprocess. |
| 473 emerge_proc = subprocess.Popen( | 524 emerge_proc = subprocess.Popen( |
| 474 shlex.split(cmdline), stdout=stdout_buffer, | 525 cmdline, shell=True, stdout=stdout_buffer, |
| 475 stderr=subprocess.STDOUT, bufsize=64*1024, env=portage_env) | 526 stderr=subprocess.STDOUT, bufsize=64*1024, env=portage_env) |
| 476 | 527 |
| 477 return (target, emerge_proc, stdout_buffer) | 528 return (target, emerge_proc, stdout_buffer) |
| 478 | 529 |
| 479 def _Finish(self, target): | 530 def _Finish(self, target): |
| 480 """Mark a target as completed and unblock dependecies.""" | 531 """Mark a target as completed and unblock dependecies.""" |
| 481 for dep in self._deps_map[target]["provides"]: | 532 for dep in self._deps_map[target]["provides"]: |
| 482 del self._deps_map[dep]["needs"][target] | 533 self._deps_map[dep]["needs"].remove(target) |
| 483 if not self._deps_map[dep]["needs"]: | 534 if not self._deps_map[dep]["needs"]: |
| 484 if VERBOSE: | 535 if VERBOSE: |
| 485 print "Unblocking %s" % dep | 536 print "Unblocking %s" % dep |
| 486 self._emerge_queue.append(dep) | 537 self._emerge_queue.append(dep) |
| 487 self._deps_map.pop(target) | 538 self._deps_map.pop(target) |
| 488 | 539 |
| 489 def _Retry(self): | 540 def _Retry(self): |
| 490 if self._retry_queue: | 541 if self._retry_queue: |
| 491 target = self._retry_queue.pop(0) | 542 target = self._retry_queue.pop(0) |
| 492 self._emerge_queue.append(target) | 543 self._emerge_queue.append(target) |
| (...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 584 | 635 |
| 585 | 636 |
| 586 # Main control code. | 637 # Main control code. |
| 587 print "Starting fast-emerge." | 638 print "Starting fast-emerge." |
| 588 PACKAGE, EMERGE_ARGS, BOARD = ParseArgs(sys.argv) | 639 PACKAGE, EMERGE_ARGS, BOARD = ParseArgs(sys.argv) |
| 589 print " Building package %s on %s (%s)" % (PACKAGE, EMERGE_ARGS, BOARD) | 640 print " Building package %s on %s (%s)" % (PACKAGE, EMERGE_ARGS, BOARD) |
| 590 | 641 |
| 591 print "Running emerge to generate deps" | 642 print "Running emerge to generate deps" |
| 592 deps_output = GetDepsFromPortage(PACKAGE) | 643 deps_output = GetDepsFromPortage(PACKAGE) |
| 593 print "Processing emerge output" | 644 print "Processing emerge output" |
| 594 dependency_tree = DepsToTree(deps_output) | 645 dependency_tree, dependency_info = DepsToTree(deps_output) |
| 595 if VERBOSE: | 646 if VERBOSE: |
| 596 print "Print tree" | 647 print "Print tree" |
| 597 PrintTree(dependency_tree) | 648 PrintTree(dependency_tree) |
| 598 | 649 |
| 599 print "Generate dependency graph." | 650 print "Generate dependency graph." |
| 600 dependency_graph = GenDependencyGraph(dependency_tree) | 651 dependency_graph = GenDependencyGraph(dependency_tree, dependency_info) |
| 601 | 652 |
| 602 if VERBOSE: | 653 if VERBOSE: |
| 603 PrintDepsMap(dependency_graph) | 654 PrintDepsMap(dependency_graph) |
| 604 | 655 |
| 605 # Run the queued emerges. | 656 # Run the queued emerges. |
| 606 scheduler = EmergeQueue(dependency_graph) | 657 scheduler = EmergeQueue(dependency_graph) |
| 607 scheduler.Run() | 658 scheduler.Run() |
| 608 | 659 |
| 609 print "Done" | 660 print "Done" |
| 610 | 661 |
| OLD | NEW |