| Index: parallel_emerge
|
| diff --git a/parallel_emerge b/parallel_emerge
|
| index 72fcc27c6d7d5a1d11ea6f2cb83c33c4e7315acd..4f54927e9192edcb7bd1496c0edd234370e0c487 100755
|
| --- a/parallel_emerge
|
| +++ b/parallel_emerge
|
| @@ -138,58 +138,49 @@ def GetDepsFromPortage(package):
|
| Args:
|
| package: string containing the packages to build.
|
| Returns:
|
| - text output of emege -p --debug, which can be processed elsewhere.
|
| + text output of emerge -p --debug, which can be processed elsewhere.
|
| """
|
| print "Calculating deps for package %s" % package
|
| - cmdline = EmergeCommand() + " -p --debug " + package
|
| + cmdline = EmergeCommand() + " -p --debug --color=n " + package
|
| print "+ %s" % cmdline
|
|
|
| # Store output in a temp file as it is too big for a unix pipe.
|
| stderr_buffer = tempfile.TemporaryFile()
|
| stdout_buffer = tempfile.TemporaryFile()
|
| # Launch the subprocess.
|
| + start = time.time()
|
| depsproc = subprocess.Popen(shlex.split(cmdline), stderr=stderr_buffer,
|
| stdout=stdout_buffer, bufsize=64*1024)
|
| -
|
| - # Wait for this to complete.
|
| - seconds = 0
|
| - while depsproc.poll() is not None:
|
| - seconds += 1
|
| - time.sleep(1)
|
| - if seconds % 5 == 0:
|
| - print ".",
|
| - print " done"
|
| -
|
| - print "Deps calculated in %d:%02ds" % (seconds / 60, seconds % 60)
|
| -
|
| depsproc.wait()
|
| + seconds = time.time() - start
|
| + print "Deps calculated in %d:%04.1fs" % (seconds / 60, seconds % 60)
|
| stderr_buffer.seek(0)
|
| stderr_raw = stderr_buffer.read()
|
| info_start = stderr_raw.find("digraph")
|
| + stdout_buffer.seek(0)
|
| + stdout_raw = stdout_buffer.read()
|
| + lines = []
|
| if info_start != -1:
|
| - stdout = stderr_raw[info_start:]
|
| - else:
|
| - stdout_buffer.seek(0)
|
| - stdout_raw = stdout_buffer.read()
|
| - stdout = stderr_raw + stdout_raw
|
| + lines = stderr_raw[info_start:].split("\n")
|
| + lines.extend(stdout_raw.split("\n"))
|
| if VERBOSE or depsproc.returncode != 0:
|
| - print stdout
|
| + output = stderr_raw + stdout_raw
|
| + print output
|
| if depsproc.returncode != 0:
|
| print "Failed to generate deps"
|
| sys.exit(1)
|
|
|
| - lines = stdout.split("\n")
|
| return lines
|
|
|
|
|
| def DepsToTree(lines):
|
| - """Regex the emerge --tree output to generate a nested dict of dependencies.
|
| + """Regex the output from 'emerge --debug' to generate a nested dict of deps.
|
|
|
| Args:
|
| - lines: text dump from 'emerge -p --tree package'
|
| + lines: output from 'emerge -p --debug package'
|
| Returns:
|
| dep_tree: nested dict of dependencies, as specified by emerge.
|
| - there may be dupes, or circular deps.
|
| + there may be dupes, or circular deps.
|
|
|
| We need to regex lines as follows:
|
| hard-host-depends depends on
|
| @@ -205,13 +196,21 @@ def DepsToTree(lines):
|
| r"(?P<version>\d+[\w\.-]*)\', \'(?P<action>\w+)\'\) "
|
| r"(?P<deptype>(depends on|\(.*\)))")
|
| re_origdeps = re.compile(r"(?P<pkgname>[\w\+/-]+) depends on")
|
| + re_installed_package = re.compile(
|
| + r"\[(?P<desc>[^\]]*)\] "
|
| + r"(?P<pkgdir>[\w\+-]+)/"
|
| + r"(?P<pkgname>[\w\+-]+)-"
|
| + r"(?P<version>\d+[\w\.-]*)( \["
|
| + r"(?P<oldversion>\d+[\w\.-]*)\])?"
|
| + )
|
| re_failed = re.compile(r".*depends on.*")
|
| -
|
| deps_tree = {}
|
| deps_stack = []
|
| + deps_info = {}
|
| for line in lines:
|
| m = re_deps.match(line)
|
| m_orig = re_origdeps.match(line)
|
| + m_installed = re_installed_package.match(line)
|
| if m:
|
| pkgname = m.group("pkgname")
|
| pkgdir = m.group("pkgdir")
|
| @@ -283,15 +282,30 @@ def DepsToTree(lines):
|
| deps_stack = deps_stack[0:depth]
|
| # Add ourselves to the end of the stack.
|
| deps_stack.append(pkgname)
|
| + elif m_installed:
|
| + pkgname = m_installed.group("pkgname")
|
| + pkgdir = m_installed.group("pkgdir")
|
| + version = m_installed.group("version")
|
| + oldversion = m_installed.group("oldversion")
|
| + desc = m_installed.group("desc")
|
| + uninstall = False
|
| + if oldversion and (desc.find("U") != -1 or desc.find("D") != -1):
|
| + uninstall = True
|
| + fullpkg = "%s/%s-%s" % (pkgdir, pkgname, version)
|
| + deps_info[fullpkg] = {"idx": len(deps_info),
|
| + "pkgdir": pkgdir,
|
| + "pkgname": pkgname,
|
| + "oldversion": oldversion,
|
| + "uninstall": uninstall}
|
| else:
|
| - # Is this a package that failed to match uor huge regex?
|
| + # Is this a package that failed to match our huge regex?
|
| m = re_failed.match(line)
|
| if m:
|
| print "FAIL: Couldn't understand line:"
|
| print line
|
| sys.exit(1)
|
|
|
| - return deps_tree
|
| + return deps_tree, deps_info
|
|
|
|
|
| def PrintTree(deps, depth=""):
|
| @@ -307,11 +321,12 @@ def PrintTree(deps, depth=""):
|
| PrintTree(deps[entry]["deps"], depth=depth + " ")
|
|
|
|
|
| -def GenDependencyGraph(deps_tree):
|
| +def GenDependencyGraph(deps_tree, deps_info):
|
| """Generate a doubly linked dependency graph.
|
|
|
| Args:
|
| deps_tree: dependency tree structure.
|
| + deps_info: more info on the dependencies.
|
| Returns:
|
| Deps graph in the form of a dict of packages, with each package
|
| specifying a "needs" list and "provides" list.
|
| @@ -331,48 +346,78 @@ def GenDependencyGraph(deps_tree):
|
| for pkg in packages:
|
| action = packages[pkg]["action"]
|
| this_pkg = deps_map.setdefault(
|
| - pkg, {"needs": {}, "provides": set(), "action": "nomerge"})
|
| + pkg, {"needs": set(), "provides": set(), "action": "nomerge"})
|
| if action != "nomerge":
|
| this_pkg["action"] = action
|
| + this_pkg["deps_info"] = deps_info.get(pkg)
|
| ReverseTree(packages[pkg]["deps"])
|
| for dep, dep_item in packages[pkg]["deps"].items():
|
| dep_pkg = deps_map[dep]
|
| dep_type = dep_item["deptype"]
|
| - if dep_type == "(runtime_post)":
|
| - dep_pkg["needs"][pkg] = dep_type
|
| - this_pkg["provides"].add(dep)
|
| - else:
|
| + if dep_type != "(runtime_post)":
|
| dep_pkg["provides"].add(pkg)
|
| - this_pkg["needs"][dep] = dep_type
|
| + this_pkg["needs"].add(dep)
|
| +
|
| + def RemoveInstalledPackages():
|
| + """Remove installed packages, propagating dependencies"""
|
| +
|
| + rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
|
| + for pkg in rm_pkgs:
|
| + this_pkg = deps_map[pkg]
|
| + needs = this_pkg["needs"]
|
| + provides = this_pkg["provides"]
|
| + for dep in needs:
|
| + dep_provides = deps_map[dep]["provides"]
|
| + dep_provides.update(provides)
|
| + dep_provides.discard(pkg)
|
| + dep_provides.discard(dep)
|
| + for target in provides:
|
| + target_needs = deps_map[target]["needs"]
|
| + target_needs.update(needs)
|
| + target_needs.discard(pkg)
|
| + target_needs.discard(target)
|
| + del deps_map[pkg]
|
| +
|
|
|
| def SanitizeDep(basedep, currdep, oldstack, limit):
|
| - """Remove any circular dependencies between basedep, currdep, then recurse.
|
| + """Search for circular deps between basedep and currdep, then recurse.
|
|
|
| Args:
|
| basedep: original dependency, top of stack.
|
| currdep: bottom of our current recursion, bottom of stack.
|
| oldstack: current dependency chain.
|
| limit: how many more levels of recusion to go through, max.
|
| - TODO(): Break PDEPEND preferentially, then RDEPEND. Also extract emerge
|
| - linear ordering and break cycles on default emerge linear order.
|
| + TODO(): Break RDEPEND preferentially.
|
| + Returns:
|
| + True iff circular dependencies are found.
|
| """
|
| if limit == 0:
|
| return
|
| for dep in deps_map[currdep]["needs"]:
|
| stack = oldstack + [dep]
|
| - if basedep in deps_map[dep]["needs"]:
|
| + if basedep in deps_map[dep]["needs"] or dep == basedep:
|
| + if dep != basedep:
|
| + stack += [basedep]
|
| print "Remove cyclic dependency from:"
|
| for i in xrange(0, len(stack) - 1):
|
| - print " %s (%s)-> %s " % (
|
| - stack[i], deps_map[stack[i]]["needs"][stack[i+1]], stack[i+1])
|
| - del deps_map[dep]["needs"][basedep]
|
| - deps_map[basedep]["provides"].remove(dep)
|
| - SanitizeDep(basedep, dep, stack, limit - 1)
|
| + print " %s -> %s " % (stack[i], stack[i+1])
|
| + return True
|
| + if dep not in oldstack and SanitizeDep(basedep, dep, stack, limit - 1):
|
| + return True
|
| + return
|
|
|
| def SanitizeTree():
|
| - """Remove circular dependencies up to cycle length 8."""
|
| - for dep in deps_map:
|
| - SanitizeDep(dep, dep, [dep], 8)
|
| + """Remove circular dependencies up to cycle length 32."""
|
| + start = time.time()
|
| + for basedep in deps_map:
|
| + for dep in deps_map[basedep]["needs"].copy():
|
| + if deps_info[basedep]["idx"] <= deps_info[dep]["idx"]:
|
| + if SanitizeDep(basedep, dep, [basedep, dep], 31):
|
| + print "Breaking", basedep, " -> ", dep
|
| + deps_map[basedep]["needs"].remove(dep)
|
| + deps_map[dep]["provides"].remove(basedep)
|
| + seconds = time.time() - start
|
| + print "Tree sanitized in %d:%04.1fs" % (seconds / 60, seconds % 60)
|
|
|
| def AddSecretDeps():
|
| """Find these tagged packages and add extra dependencies.
|
| @@ -390,10 +435,11 @@ def GenDependencyGraph(deps_tree):
|
| needed_pkg = dep
|
| if bad_pkg and needed_pkg:
|
| deps_map[needed_pkg]["provides"].add(bad_pkg)
|
| - deps_map[bad_pkg]["needs"][needed_pkg] = "(manually forced)"
|
| + deps_map[bad_pkg]["needs"].add(needed_pkg)
|
|
|
| ReverseTree(deps_tree)
|
| AddSecretDeps()
|
| + RemoveInstalledPackages()
|
| SanitizeTree()
|
| return deps_map
|
|
|
| @@ -402,8 +448,8 @@ def PrintDepsMap(deps_map):
|
| """Print dependency graph, for each package list it's prerequisites."""
|
| for i in deps_map:
|
| print "%s: (%s) needs" % (i, deps_map[i]["action"])
|
| - for j, dep_type in deps_map[i]["needs"].items():
|
| - print " %s ( %s )" % (j, dep_type)
|
| + for j in deps_map[i]["needs"]:
|
| + print " %s" % (j)
|
|
|
|
|
| class EmergeQueue(object):
|
| @@ -426,9 +472,9 @@ class EmergeQueue(object):
|
|
|
| def _Status(self):
|
| """Print status."""
|
| - print "Pending %s, Ready %s, Running %s, Failed %s, Total %s" % (
|
| + print "Pending %s, Ready %s, Running %s, Retrying %s, Total %s" % (
|
| len(self._deps_map), len(self._emerge_queue),
|
| - len(self._jobs), len(self._failed), self._total_jobs)
|
| + len(self._jobs), len(self._retry_queue), self._total_jobs)
|
|
|
| def _LaunchOneEmerge(self, target):
|
| """Run emerge --nodeps to do a single package install.
|
| @@ -455,14 +501,19 @@ class EmergeQueue(object):
|
| # in the "world" file, which represents explicit intalls.
|
| # "--oneshot" here will prevent it from being tagged in world.
|
| cmdline = EmergeCommand() + " --nodeps --oneshot =" + target
|
| - if VERBOSE:
|
| - print "running %s" % cmdline
|
| + deps_info = self._deps_map[target]["deps_info"]
|
| + if deps_info["uninstall"]:
|
| + package = "%(pkgdir)s/%(pkgname)s-%(oldversion)s" % deps_info
|
| + cmdline += " && %s -1C =%s" % (EmergeCommand(), package)
|
| +
|
| + print "+ %s" % cmdline
|
|
|
| # Store output in a temp file as it is too big for a unix pipe.
|
| stdout_buffer = tempfile.TemporaryFile()
|
| # Modify the environment to disable locking.
|
| portage_env = os.environ.copy()
|
| portage_env["PORTAGE_LOCKS"] = "false"
|
| + portage_env["UNMERGE_DELAY"] = "0"
|
| # Autoclean rummages around in the portage database and uninstalls
|
| # old packages. Definitely not necessary for build_image. However
|
| # it may be necessary for incremental build_packages. It may also
|
| @@ -471,7 +522,7 @@ class EmergeQueue(object):
|
| portage_env["AUTOCLEAN"] = "no"
|
| # Launch the subprocess.
|
| emerge_proc = subprocess.Popen(
|
| - shlex.split(cmdline), stdout=stdout_buffer,
|
| + cmdline, shell=True, stdout=stdout_buffer,
|
| stderr=subprocess.STDOUT, bufsize=64*1024, env=portage_env)
|
|
|
| return (target, emerge_proc, stdout_buffer)
|
| @@ -479,7 +530,7 @@ class EmergeQueue(object):
|
| def _Finish(self, target):
|
| """Mark a target as completed and unblock dependecies."""
|
| for dep in self._deps_map[target]["provides"]:
|
| - del self._deps_map[dep]["needs"][target]
|
| + self._deps_map[dep]["needs"].remove(target)
|
| if not self._deps_map[dep]["needs"]:
|
| if VERBOSE:
|
| print "Unblocking %s" % dep
|
| @@ -591,13 +642,13 @@ print " Building package %s on %s (%s)" % (PACKAGE, EMERGE_ARGS, BOARD)
|
| print "Running emerge to generate deps"
|
| deps_output = GetDepsFromPortage(PACKAGE)
|
| print "Processing emerge output"
|
| -dependency_tree = DepsToTree(deps_output)
|
| +dependency_tree, dependency_info = DepsToTree(deps_output)
|
| if VERBOSE:
|
| print "Print tree"
|
| PrintTree(dependency_tree)
|
|
|
| print "Generate dependency graph."
|
| -dependency_graph = GenDependencyGraph(dependency_tree)
|
| +dependency_graph = GenDependencyGraph(dependency_tree, dependency_info)
|
|
|
| if VERBOSE:
|
| PrintDepsMap(dependency_graph)
|
|
|