| Index: sys-apps/portage/files/portage-2.1.9.45-mergeprocess.patch
|
| diff --git a/sys-apps/portage/files/portage-2.1.9.45-mergeprocess.patch b/sys-apps/portage/files/portage-2.1.9.45-mergeprocess.patch
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..95b80e881c3a813dfcc7960ad469ee43689fce97
|
| --- /dev/null
|
| +++ b/sys-apps/portage/files/portage-2.1.9.45-mergeprocess.patch
|
| @@ -0,0 +1,1307 @@
|
| +commit 0f5b0e1b77208771dfedc43646636597f74abe49
|
| +Author: David James <davidjames@google.com>
|
| +Date: Thu Mar 24 19:36:33 2011 -0700
|
| +
|
| + Merge packages asynchronously in Portage.
|
| +
|
| + This allows for the scheduler to continue to run while packages are
|
| + being merged and installed, allowing for additional parallelism and
|
| + making better use of the CPUs.
|
| +
|
| + Review URL: http://codereview.chromium.org/6713043
|
| + (cherry picked from commit 7535cabdf2fab76fc55df83643157613dfd66be9)
|
| +
|
| + Preserve elog message continuity during updates.
|
| +
|
| + This integrates the fix from commit
|
| + 8209aeab647b1ab80a64d5931069b3533776ef75 with the asynchronous merge
|
| + changes from commit 7535cabdf2fab76fc55df83643157613dfd66be9.
|
| + (cherry picked from commit f823031ed33bda9579d265b62607380bb255dfdd)
|
| +
|
| + MergeProcess: Fix PORTAGE_BACKGROUND/LOG_FILE use
|
| +
|
| + In this subprocess we don't want PORTAGE_BACKGROUND to suppress
|
| + stdout/stderr output since they are pipes. We also don't want to open
|
| + PORTAGE_LOG_FILE, since it will already be opened by the parent
|
| + process, so we set the PORTAGE_BACKGROUND="subprocess" value for use
|
| + in conditional logging code involving PORTAGE_LOG_FILE.
|
| + (cherry picked from commit 3081e651fc3cd3a0729bb1fbe2e93fbc58dcef0d)
|
| +
|
| + MergeProcess: close elog_writer_fd leak
|
| + (cherry picked from commit 981f27f79e607877e7b8e47a904e3972d0e8336e)
|
| +
|
| + ebuild(1): fix AttributeError for merge phase
|
| +
|
| + File "pym/portage/dbapi/vartree.py", line 4043, in merge
|
| + merge_task.start()
|
| + File "pym/_emerge/AsynchronousTask.py", line 23, in start
|
| + self._start()
|
| + File "pym/_emerge/SpawnProcess.py", line 118, in _start
|
| + self._reg_id = self.scheduler.register(files.process.fileno(),
|
| + AttributeError: 'NoneType' object has no attribute 'register'
|
| + (cherry picked from commit f3c4a464cc38788a4946de5331c2618c183fccac)
|
| +
|
| + MergeProcess: separate unmerge output from merge
|
| +
|
| + The unmerge output has been mixed together with the merge output since
|
| + commit 7535cabdf2fab76fc55df83643157613dfd66be9 because
|
| + dblink._scheduler was set to None. Now it's fixed to produce separate
|
| + logs like it used to.
|
| + (cherry picked from commit 2fd76b639d44f3ff3624ed8dbe96d214a42875e5)
|
| +
|
| + vartree: remove unused scheduler references
|
| +
|
| + Since all the merge code runs inside MergeProcess now, there's no
|
| + reason to yield to the scheduler.
|
| + (cherry picked from commit 7ffa0683cd9c40e630488af5783c549bee5cd3c8)
|
| +
|
| + dblink: fix elog bugs for unmerge phases
|
| + (cherry picked from commit fe6a9433426d284aabdf774376082fbed1741478)
|
| +
|
| + MergeProcess: handle unicode in elog pipe
|
| + (cherry picked from commit 176e2c74b368c73b593553773e749b3f25ddad72)
|
| +
|
| + vartree: remove broken scheduler reference
|
| + (cherry picked from commit 0c4d01435737ba71d9c628ee34849c36ecec140b)
|
| +
|
| + MergeProcess: toggle vardbapi._pkgs_changed
|
| +
|
| + dblink: use self.mycpv in _elog
|
| +
|
| + MergeProcess: call elog_process for replaced pkgs
|
| +
|
| + PackageMerge: call wait() for conformity
|
| +
|
| + Reoranize PackageMerge/MergeListItem interaction.
|
| +
|
| + unmerge: fix PORTAGE_BACKGROUND logic
|
| +
|
| + MergeProcess: relocate portage reinstall code
|
| +
|
| + This code goes inside _start since it needs to execute in the parent
|
| + process.
|
| +
|
| + MergeProcess: query blockers in the main process
|
| +
|
| + Metadata cache queries may not work for some databases from within a
|
| + subprocess. For example, sqlite is known to misbehave.
|
| +
|
| + MergeProcess: add fallback setcpv call
|
| +
|
| + It's important that this metadata access happens in the parent process,
|
| + since closing of file descriptors in the subprocess can prevent access
|
| + to open database connections such as that used by the sqlite metadata
|
| + cache module.
|
| +
|
| + dblink: rename 'buffer' var in _elog_process
|
| +
|
| + Avoid name collision with built-in 'buffer' function.
|
| +
|
| +diff --git a/pym/_emerge/AbstractEbuildProcess.py b/pym/_emerge/AbstractEbuildProcess.py
|
| +index d7f31be..39c613b 100644
|
| +--- a/pym/_emerge/AbstractEbuildProcess.py
|
| ++++ b/pym/_emerge/AbstractEbuildProcess.py
|
| +@@ -225,8 +225,10 @@ class AbstractEbuildProcess(SpawnProcess):
|
| + msg = _unicode_decode(out.getvalue(),
|
| + encoding=_encodings['content'], errors='replace')
|
| + if msg:
|
| +- self.scheduler.output(msg,
|
| +- log_path=self.settings.get("PORTAGE_LOG_FILE"))
|
| ++ log_path = None
|
| ++ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
|
| ++ log_path = self.settings.get("PORTAGE_LOG_FILE")
|
| ++ self.scheduler.output(msg, log_path=log_path)
|
| +
|
| + def _log_poll_exception(self, event):
|
| + self._elog("eerror",
|
| +diff --git a/pym/_emerge/AsynchronousTask.py b/pym/_emerge/AsynchronousTask.py
|
| +index de00800..2b654ec 100644
|
| +--- a/pym/_emerge/AsynchronousTask.py
|
| ++++ b/pym/_emerge/AsynchronousTask.py
|
| +@@ -1,6 +1,7 @@
|
| + # Copyright 1999-2011 Gentoo Foundation
|
| + # Distributed under the terms of the GNU General Public License v2
|
| +
|
| ++from portage import os
|
| + from _emerge.SlotObject import SlotObject
|
| + class AsynchronousTask(SlotObject):
|
| + """
|
| +@@ -23,7 +24,8 @@ class AsynchronousTask(SlotObject):
|
| + self._start()
|
| +
|
| + def _start(self):
|
| +- raise NotImplementedError(self)
|
| ++ self.returncode = os.EX_OK
|
| ++ self.wait()
|
| +
|
| + def isAlive(self):
|
| + return self.returncode is None
|
| +diff --git a/pym/_emerge/Binpkg.py b/pym/_emerge/Binpkg.py
|
| +index 0058745..bc6b85d 100644
|
| +--- a/pym/_emerge/Binpkg.py
|
| ++++ b/pym/_emerge/Binpkg.py
|
| +@@ -307,7 +307,7 @@ class Binpkg(CompositeTask):
|
| + portage.elog.elog_process(self.pkg.cpv, self.settings)
|
| + self._build_dir.unlock()
|
| +
|
| +- def install(self):
|
| ++ def create_install_task(self):
|
| +
|
| + # This gives bashrc users an opportunity to do various things
|
| + # such as remove binary packages after they're installed.
|
| +@@ -320,19 +320,17 @@ class Binpkg(CompositeTask):
|
| + pkg=self.pkg, pkg_count=self.pkg_count,
|
| + pkg_path=self._pkg_path, scheduler=self.scheduler,
|
| + settings=settings, tree=self._tree, world_atom=self.world_atom)
|
| +-
|
| +- try:
|
| +- retval = merge.execute()
|
| +- finally:
|
| +- settings.pop("PORTAGE_BINPKG_FILE", None)
|
| +- self._unlock_builddir()
|
| +-
|
| +- if retval == os.EX_OK and \
|
| ++ task = merge.create_task()
|
| ++ task.addExitListener(self._install_exit)
|
| ++ return task
|
| ++
|
| ++ def _install_exit(self, task):
|
| ++ self.settings.pop("PORTAGE_BINPKG_FILE", None)
|
| ++ self._unlock_builddir()
|
| ++ if task.returncode == os.EX_OK and \
|
| + 'binpkg-logs' not in self.settings.features and \
|
| + self.settings.get("PORTAGE_LOG_FILE"):
|
| + try:
|
| + os.unlink(self.settings["PORTAGE_LOG_FILE"])
|
| + except OSError:
|
| + pass
|
| +- return retval
|
| +-
|
| +diff --git a/pym/_emerge/EbuildBuild.py b/pym/_emerge/EbuildBuild.py
|
| +index 98f5a2b..c33153b 100644
|
| +--- a/pym/_emerge/EbuildBuild.py
|
| ++++ b/pym/_emerge/EbuildBuild.py
|
| +@@ -314,7 +314,7 @@ class EbuildBuild(CompositeTask):
|
| + self._unlock_builddir()
|
| + self.wait()
|
| +
|
| +- def install(self):
|
| ++ def create_install_task(self):
|
| + """
|
| + Install the package and then clean up and release locks.
|
| + Only call this after the build has completed successfully
|
| +@@ -343,10 +343,9 @@ class EbuildBuild(CompositeTask):
|
| + (pkg_count.curval, pkg_count.maxval, pkg.cpv)
|
| + logger.log(msg, short_msg=short_msg)
|
| +
|
| +- try:
|
| +- rval = merge.execute()
|
| +- finally:
|
| +- self._unlock_builddir()
|
| +-
|
| +- return rval
|
| ++ task = merge.create_task()
|
| ++ task.addExitListener(self._install_exit)
|
| ++ return task
|
| +
|
| ++ def _install_exit(self, task):
|
| ++ self._unlock_builddir()
|
| +diff --git a/pym/_emerge/EbuildMerge.py b/pym/_emerge/EbuildMerge.py
|
| +index d73a262..6a58692 100644
|
| +--- a/pym/_emerge/EbuildMerge.py
|
| ++++ b/pym/_emerge/EbuildMerge.py
|
| +@@ -4,6 +4,8 @@
|
| + from _emerge.SlotObject import SlotObject
|
| + import portage
|
| + from portage import os
|
| ++from portage.dbapi._MergeProcess import MergeProcess
|
| ++from portage.dbapi.vartree import dblink
|
| +
|
| + class EbuildMerge(SlotObject):
|
| +
|
| +@@ -11,28 +13,35 @@ class EbuildMerge(SlotObject):
|
| + "pkg", "pkg_count", "pkg_path", "pretend",
|
| + "scheduler", "settings", "tree", "world_atom")
|
| +
|
| +- def execute(self):
|
| ++ def create_task(self):
|
| + root_config = self.pkg.root_config
|
| + settings = self.settings
|
| +- retval = portage.merge(settings["CATEGORY"],
|
| +- settings["PF"], settings["D"],
|
| +- os.path.join(settings["PORTAGE_BUILDDIR"],
|
| +- "build-info"), root_config.root, settings,
|
| +- myebuild=settings["EBUILD"],
|
| +- mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
|
| +- vartree=root_config.trees["vartree"],
|
| +- prev_mtimes=self.ldpath_mtimes,
|
| +- scheduler=self.scheduler,
|
| +- blockers=self.find_blockers)
|
| +-
|
| +- if retval == os.EX_OK:
|
| +- self.world_atom(self.pkg)
|
| +- self._log_success()
|
| +-
|
| +- return retval
|
| +-
|
| +- def _log_success(self):
|
| ++ mycat = settings["CATEGORY"]
|
| ++ mypkg = settings["PF"]
|
| ++ pkgloc = settings["D"]
|
| ++ infloc = os.path.join(settings["PORTAGE_BUILDDIR"], "build-info")
|
| ++ myroot = root_config.root
|
| ++ myebuild = settings["EBUILD"]
|
| ++ mydbapi = root_config.trees[self.tree].dbapi
|
| ++ vartree = root_config.trees["vartree"]
|
| ++ background = (settings.get('PORTAGE_BACKGROUND') == '1')
|
| ++ logfile = settings.get('PORTAGE_LOG_FILE')
|
| ++
|
| ++ merge_task = MergeProcess(
|
| ++ dblink=dblink, mycat=mycat, mypkg=mypkg, settings=settings,
|
| ++ treetype=self.tree, vartree=vartree, scheduler=self.scheduler,
|
| ++ background=background, blockers=self.find_blockers, pkgloc=pkgloc,
|
| ++ infloc=infloc, myebuild=myebuild, mydbapi=mydbapi,
|
| ++ prev_mtimes=self.ldpath_mtimes, logfile=logfile)
|
| ++ merge_task.addExitListener(self._log_exit)
|
| ++ return merge_task
|
| ++
|
| ++ def _log_exit(self, task):
|
| ++ if task.returncode != os.EX_OK:
|
| ++ return
|
| ++
|
| + pkg = self.pkg
|
| ++ self.world_atom(pkg)
|
| + pkg_count = self.pkg_count
|
| + pkg_path = self.pkg_path
|
| + logger = self.logger
|
| +diff --git a/pym/_emerge/EbuildPhase.py b/pym/_emerge/EbuildPhase.py
|
| +index e3270c8..a24608b 100644
|
| +--- a/pym/_emerge/EbuildPhase.py
|
| ++++ b/pym/_emerge/EbuildPhase.py
|
| +@@ -121,9 +121,10 @@ class EbuildPhase(CompositeTask):
|
| + # Don't open the log file during the clean phase since the
|
| + # open file can result in an nfs lock on $T/build.log which
|
| + # prevents the clean phase from removing $T.
|
| +- logfile = self.settings.get("PORTAGE_LOG_FILE")
|
| +- if self.phase in ("clean", "cleanrm"):
|
| +- logfile = None
|
| ++ logfile = None
|
| ++ if self.phase not in ("clean", "cleanrm") and \
|
| ++ self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
|
| ++ logfile = self.settings.get("PORTAGE_LOG_FILE")
|
| +
|
| + fd_pipes = None
|
| + if not self.background and self.phase == 'nofetch':
|
| +@@ -151,13 +152,16 @@ class EbuildPhase(CompositeTask):
|
| + if not fail:
|
| + self.returncode = None
|
| +
|
| ++ logfile = None
|
| ++ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
|
| ++ logfile = self.settings.get("PORTAGE_LOG_FILE")
|
| ++
|
| + if self.phase == "install":
|
| + out = portage.StringIO()
|
| + _check_build_log(self.settings, out=out)
|
| + msg = _unicode_decode(out.getvalue(),
|
| + encoding=_encodings['content'], errors='replace')
|
| +- self.scheduler.output(msg,
|
| +- log_path=self.settings.get("PORTAGE_LOG_FILE"))
|
| ++ self.scheduler.output(msg, log_path=logfile)
|
| +
|
| + if fail:
|
| + self._die_hooks()
|
| +@@ -173,12 +177,10 @@ class EbuildPhase(CompositeTask):
|
| + msg = _unicode_decode(out.getvalue(),
|
| + encoding=_encodings['content'], errors='replace')
|
| + if msg:
|
| +- self.scheduler.output(msg,
|
| +- log_path=self.settings.get("PORTAGE_LOG_FILE"))
|
| ++ self.scheduler.output(msg, log_path=logfile)
|
| +
|
| + post_phase_cmds = _post_phase_cmds.get(self.phase)
|
| + if post_phase_cmds is not None:
|
| +- logfile = settings.get("PORTAGE_LOG_FILE")
|
| + if logfile is not None and self.phase in ("install",):
|
| + # Log to a temporary file, since the code we are running
|
| + # reads PORTAGE_LOG_FILE for QA checks, and we want to
|
| +@@ -204,7 +206,10 @@ class EbuildPhase(CompositeTask):
|
| +
|
| + self._assert_current(post_phase)
|
| +
|
| +- log_path = self.settings.get("PORTAGE_LOG_FILE")
|
| ++ log_path = None
|
| ++ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
|
| ++ log_path = self.settings.get("PORTAGE_LOG_FILE")
|
| ++
|
| + if post_phase.logfile is not None and \
|
| + post_phase.logfile != log_path:
|
| + # We were logging to a temp file (see above), so append
|
| +@@ -293,5 +298,7 @@ class EbuildPhase(CompositeTask):
|
| + msg = _unicode_decode(out.getvalue(),
|
| + encoding=_encodings['content'], errors='replace')
|
| + if msg:
|
| +- self.scheduler.output(msg,
|
| +- log_path=self.settings.get("PORTAGE_LOG_FILE"))
|
| ++ log_path = None
|
| ++ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
|
| ++ log_path = self.settings.get("PORTAGE_LOG_FILE")
|
| ++ self.scheduler.output(msg, log_path=log_path)
|
| +diff --git a/pym/_emerge/MergeListItem.py b/pym/_emerge/MergeListItem.py
|
| +index 3563f13..98a402e 100644
|
| +--- a/pym/_emerge/MergeListItem.py
|
| ++++ b/pym/_emerge/MergeListItem.py
|
| +@@ -4,6 +4,7 @@
|
| + from portage import os
|
| + from portage.output import colorize
|
| +
|
| ++from _emerge.AsynchronousTask import AsynchronousTask
|
| + from _emerge.Binpkg import Binpkg
|
| + from _emerge.CompositeTask import CompositeTask
|
| + from _emerge.EbuildBuild import EbuildBuild
|
| +@@ -103,15 +104,12 @@ class MergeListItem(CompositeTask):
|
| + self._start_task(binpkg, self._default_final_exit)
|
| + return
|
| +
|
| +- def merge(self):
|
| ++
|
| ++ def create_install_task(self):
|
| +
|
| + pkg = self.pkg
|
| + build_opts = self.build_opts
|
| +- find_blockers = self.find_blockers
|
| +- logger = self.logger
|
| + mtimedb = self.mtimedb
|
| +- pkg_count = self.pkg_count
|
| +- prefetcher = self.prefetcher
|
| + scheduler = self.scheduler
|
| + settings = self.settings
|
| + world_atom = self.world_atom
|
| +@@ -121,21 +119,18 @@ class MergeListItem(CompositeTask):
|
| + if not (build_opts.buildpkgonly or \
|
| + build_opts.fetchonly or build_opts.pretend):
|
| +
|
| +- uninstall = PackageUninstall(background=self.background,
|
| ++ task = PackageUninstall(background=self.background,
|
| + ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
|
| + pkg=pkg, scheduler=scheduler, settings=settings,
|
| + world_atom=world_atom)
|
| +
|
| +- uninstall.start()
|
| +- retval = uninstall.wait()
|
| +- if retval != os.EX_OK:
|
| +- return retval
|
| +- return os.EX_OK
|
| ++ else:
|
| ++ task = AsynchronousTask()
|
| +
|
| +- if build_opts.fetchonly or \
|
| ++ elif build_opts.fetchonly or \
|
| + build_opts.buildpkgonly:
|
| +- return self.returncode
|
| +-
|
| +- retval = self._install_task.install()
|
| +- return retval
|
| ++ task = AsynchronousTask()
|
| ++ else:
|
| ++ task = self._install_task.create_install_task()
|
| +
|
| ++ return task
|
| +diff --git a/pym/_emerge/MiscFunctionsProcess.py b/pym/_emerge/MiscFunctionsProcess.py
|
| +index ad8cefc..e6bb103 100644
|
| +--- a/pym/_emerge/MiscFunctionsProcess.py
|
| ++++ b/pym/_emerge/MiscFunctionsProcess.py
|
| +@@ -22,7 +22,8 @@ class MiscFunctionsProcess(AbstractEbuildProcess):
|
| + os.path.basename(portage.const.MISC_SH_BINARY))
|
| +
|
| + self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
|
| +- if self.logfile is None:
|
| ++ if self.logfile is None and \
|
| ++ self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
|
| + self.logfile = settings.get("PORTAGE_LOG_FILE")
|
| +
|
| + AbstractEbuildProcess._start(self)
|
| +diff --git a/pym/_emerge/PackageMerge.py b/pym/_emerge/PackageMerge.py
|
| +index 4aecf8a..8ce06b1 100644
|
| +--- a/pym/_emerge/PackageMerge.py
|
| ++++ b/pym/_emerge/PackageMerge.py
|
| +@@ -1,18 +1,14 @@
|
| + # Copyright 1999-2009 Gentoo Foundation
|
| + # Distributed under the terms of the GNU General Public License v2
|
| +
|
| +-from _emerge.AsynchronousTask import AsynchronousTask
|
| ++from _emerge.CompositeTask import CompositeTask
|
| + from portage.output import colorize
|
| +-class PackageMerge(AsynchronousTask):
|
| +- """
|
| +- TODO: Implement asynchronous merge so that the scheduler can
|
| +- run while a merge is executing.
|
| +- """
|
| +-
|
| ++class PackageMerge(CompositeTask):
|
| + __slots__ = ("merge",)
|
| +
|
| + def _start(self):
|
| +
|
| ++ self.scheduler = self.merge.scheduler
|
| + pkg = self.merge.pkg
|
| + pkg_count = self.merge.pkg_count
|
| +
|
| +@@ -40,6 +36,5 @@ class PackageMerge(AsynchronousTask):
|
| + not self.merge.build_opts.buildpkgonly:
|
| + self.merge.statusMessage(msg)
|
| +
|
| +- self.returncode = self.merge.merge()
|
| +- self.wait()
|
| +-
|
| ++ task = self.merge.create_install_task()
|
| ++ self._start_task(task, self._default_final_exit)
|
| +diff --git a/pym/portage/dbapi/_MergeProcess.py b/pym/portage/dbapi/_MergeProcess.py
|
| +index f717d12..afb2e08 100644
|
| +--- a/pym/portage/dbapi/_MergeProcess.py
|
| ++++ b/pym/portage/dbapi/_MergeProcess.py
|
| +@@ -1,33 +1,166 @@
|
| + # Copyright 2010-2011 Gentoo Foundation
|
| + # Distributed under the terms of the GNU General Public License v2
|
| +
|
| ++import shutil
|
| + import signal
|
| ++import tempfile
|
| + import traceback
|
| +
|
| ++import errno
|
| ++import fcntl
|
| + import portage
|
| +-from portage import os
|
| ++from portage import os, StringIO, _unicode_decode
|
| ++from portage.const import PORTAGE_PACKAGE_ATOM
|
| ++from portage.dep import match_from_list
|
| ++import portage.elog.messages
|
| ++from portage.elog import _preload_elog_modules
|
| ++from portage.util import ensure_dirs
|
| ++from _emerge.PollConstants import PollConstants
|
| + from _emerge.SpawnProcess import SpawnProcess
|
| +
|
| + class MergeProcess(SpawnProcess):
|
| + """
|
| +- Merge package files in a subprocess, so the Scheduler can run in the
|
| +- main thread while files are moved or copied asynchronously.
|
| ++ Merge packages in a subprocess, so the Scheduler can run in the main
|
| ++ thread while files are moved or copied asynchronously.
|
| + """
|
| +
|
| +- __slots__ = ('cfgfiledict', 'conf_mem_file', \
|
| +- 'destroot', 'dblink', 'srcroot',)
|
| ++ __slots__ = ('dblink', 'mycat', 'mypkg', 'settings', 'treetype',
|
| ++ 'vartree', 'scheduler', 'blockers', 'pkgloc', 'infloc', 'myebuild',
|
| ++ 'mydbapi', 'prev_mtimes', '_elog_reader_fd', '_elog_reg_id',
|
| ++ '_buf', '_elog_keys')
|
| +
|
| +- def _spawn(self, args, fd_pipes=None, **kwargs):
|
| ++ def _start(self):
|
| ++ # Portage should always call setcpv prior to this
|
| ++ # point, but here we have a fallback as a convenience
|
| ++ # for external API consumers. It's important that
|
| ++ # this metadata access happens in the parent process,
|
| ++ # since closing of file descriptors in the subprocess
|
| ++ # can prevent access to open database connections such
|
| ++ # as that used by the sqlite metadata cache module.
|
| ++ cpv = "%s/%s" % (self.mycat, self.mypkg)
|
| ++ settings = self.settings
|
| ++ if cpv != settings.mycpv or \
|
| ++ "IUSE" not in settings.configdict["pkg"]:
|
| ++ settings.reload()
|
| ++ settings.reset()
|
| ++ settings.setcpv(cpv, mydb=self.mydbapi)
|
| ++
|
| ++ self._handle_self_reinstall()
|
| ++ super(MergeProcess, self)._start()
|
| ++
|
| ++ def _handle_self_reinstall(self):
|
| ++ """
|
| ++ If portage is reinstalling itself, create temporary
|
| ++ copies of PORTAGE_BIN_PATH and PORTAGE_PYM_PATH in order
|
| ++ to avoid relying on the new versions which may be
|
| ++ incompatible. Register an atexit hook to clean up the
|
| ++ temporary directories. Pre-load elog modules here since
|
| ++ we won't be able to later if they get unmerged (happens
|
| ++ when namespace changes).
|
| ++ """
|
| ++
|
| ++ settings = self.settings
|
| ++ cpv = settings.mycpv
|
| ++ reinstall_self = False
|
| ++ if self.settings["ROOT"] == "/" and \
|
| ++ match_from_list(PORTAGE_PACKAGE_ATOM, [cpv]):
|
| ++ inherited = frozenset(self.settings.get('INHERITED', '').split())
|
| ++ if not self.vartree.dbapi.cpv_exists(cpv) or \
|
| ++ '9999' in cpv or \
|
| ++ 'git' in inherited or \
|
| ++ 'git-2' in inherited:
|
| ++ reinstall_self = True
|
| ++
|
| ++ if reinstall_self:
|
| ++ # Load lazily referenced portage submodules into memory,
|
| ++ # so imports won't fail during portage upgrade/downgrade.
|
| ++ _preload_elog_modules(self.settings)
|
| ++ portage.proxy.lazyimport._preload_portage_submodules()
|
| ++
|
| ++ # Make the temp directory inside $PORTAGE_TMPDIR/portage, since
|
| ++ # it's common for /tmp and /var/tmp to be mounted with the
|
| ++ # "noexec" option (see bug #346899).
|
| ++ build_prefix = os.path.join(settings["PORTAGE_TMPDIR"], "portage")
|
| ++ ensure_dirs(build_prefix)
|
| ++ base_path_tmp = tempfile.mkdtemp(
|
| ++ "", "._portage_reinstall_.", build_prefix)
|
| ++ portage.process.atexit_register(shutil.rmtree, base_path_tmp)
|
| ++ dir_perms = 0o755
|
| ++ for subdir in "bin", "pym":
|
| ++ var_name = "PORTAGE_%s_PATH" % subdir.upper()
|
| ++ var_orig = settings[var_name]
|
| ++ var_new = os.path.join(base_path_tmp, subdir)
|
| ++ settings[var_name] = var_new
|
| ++ settings.backup_changes(var_name)
|
| ++ shutil.copytree(var_orig, var_new, symlinks=True)
|
| ++ os.chmod(var_new, dir_perms)
|
| ++ portage._bin_path = settings['PORTAGE_BIN_PATH']
|
| ++ portage._pym_path = settings['PORTAGE_PYM_PATH']
|
| ++ os.chmod(base_path_tmp, dir_perms)
|
| ++
|
| ++ def _elog_output_handler(self, fd, event):
|
| ++ output = None
|
| ++ if event & PollConstants.POLLIN:
|
| ++ try:
|
| ++ output = os.read(fd, self._bufsize)
|
| ++ except OSError as e:
|
| ++ if e.errno not in (errno.EAGAIN, errno.EINTR):
|
| ++ raise
|
| ++ if output:
|
| ++ lines = _unicode_decode(output).split('\n')
|
| ++ if len(lines) == 1:
|
| ++ self._buf += lines[0]
|
| ++ else:
|
| ++ lines[0] = self._buf + lines[0]
|
| ++ self._buf = lines.pop()
|
| ++ out = StringIO()
|
| ++ for line in lines:
|
| ++ funcname, phase, key, msg = line.split(' ', 3)
|
| ++ self._elog_keys.add(key)
|
| ++ reporter = getattr(portage.elog.messages, funcname)
|
| ++ reporter(msg, phase=phase, key=key, out=out)
|
| ++
|
| ++ def _spawn(self, args, fd_pipes, **kwargs):
|
| + """
|
| + Fork a subprocess, apply local settings, and call
|
| +- dblink._merge_process().
|
| ++ dblink.merge().
|
| + """
|
| +
|
| ++ elog_reader_fd, elog_writer_fd = os.pipe()
|
| ++ fcntl.fcntl(elog_reader_fd, fcntl.F_SETFL,
|
| ++ fcntl.fcntl(elog_reader_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
|
| ++ blockers = None
|
| ++ if self.blockers is not None:
|
| ++ # Query blockers in the main process, since closing
|
| ++ # of file descriptors in the subprocess can prevent
|
| ++ # access to open database connections such as that
|
| ++ # used by the sqlite metadata cache module.
|
| ++ blockers = self.blockers()
|
| ++ mylink = self.dblink(self.mycat, self.mypkg, settings=self.settings,
|
| ++ treetype=self.treetype, vartree=self.vartree,
|
| ++ blockers=blockers, scheduler=self.scheduler,
|
| ++ pipe=elog_writer_fd)
|
| ++ fd_pipes[elog_writer_fd] = elog_writer_fd
|
| ++ self._elog_reg_id = self.scheduler.register(elog_reader_fd,
|
| ++ self._registered_events, self._elog_output_handler)
|
| ++
|
| + pid = os.fork()
|
| + if pid != 0:
|
| ++ os.close(elog_writer_fd)
|
| ++ self._elog_reader_fd = elog_reader_fd
|
| ++ self._buf = ""
|
| ++ self._elog_keys = set()
|
| ++
|
| ++ # invalidate relevant vardbapi caches
|
| ++ if self.vartree.dbapi._categories is not None:
|
| ++ self.vartree.dbapi._categories = None
|
| ++ self.vartree.dbapi._pkgs_changed = True
|
| ++ self.vartree.dbapi._clear_pkg_cache(mylink)
|
| ++
|
| + portage.process.spawned_pids.append(pid)
|
| + return [pid]
|
| +
|
| ++ os.close(elog_reader_fd)
|
| + portage.process._setup_pipes(fd_pipes)
|
| +
|
| + # Use default signal handlers since the ones inherited
|
| +@@ -35,18 +168,32 @@ class MergeProcess(SpawnProcess):
|
| + signal.signal(signal.SIGINT, signal.SIG_DFL)
|
| + signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
| +
|
| +- portage.output.havecolor = self.dblink.settings.get('NOCOLOR') \
|
| ++ portage.output.havecolor = self.settings.get('NOCOLOR') \
|
| + not in ('yes', 'true')
|
| +
|
| +- # In this subprocess we want dblink._display_merge() to use
|
| ++ # In this subprocess we want mylink._display_merge() to use
|
| + # stdout/stderr directly since they are pipes. This behavior
|
| +- # is triggered when dblink._scheduler is None.
|
| +- self.dblink._scheduler = None
|
| ++ # is triggered when mylink._scheduler is None.
|
| ++ mylink._scheduler = None
|
| ++
|
| ++ # In this subprocess we don't want PORTAGE_BACKGROUND to
|
| ++ # suppress stdout/stderr output since they are pipes. We
|
| ++ # also don't want to open PORTAGE_LOG_FILE, since it will
|
| ++ # already be opened by the parent process, so we set the
|
| ++ # "subprocess" value for use in conditional logging code
|
| ++ # involving PORTAGE_LOG_FILE.
|
| ++ if self.settings.get("PORTAGE_BACKGROUND") == "1":
|
| ++ # unmerge phases have separate logs
|
| ++ self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "1"
|
| ++ self.settings.backup_changes("PORTAGE_BACKGROUND_UNMERGE")
|
| ++ self.settings["PORTAGE_BACKGROUND"] = "subprocess"
|
| ++ self.settings.backup_changes("PORTAGE_BACKGROUND")
|
| +
|
| + rval = 1
|
| + try:
|
| +- rval = self.dblink._merge_process(self.srcroot, self.destroot,
|
| +- self.cfgfiledict, self.conf_mem_file)
|
| ++ rval = mylink.merge(self.pkgloc, self.infloc,
|
| ++ myebuild=self.myebuild, mydbapi=self.mydbapi,
|
| ++ prev_mtimes=self.prev_mtimes)
|
| + except SystemExit:
|
| + raise
|
| + except:
|
| +@@ -55,3 +202,21 @@ class MergeProcess(SpawnProcess):
|
| + # Call os._exit() from finally block, in order to suppress any
|
| + # finally blocks from earlier in the call stack. See bug #345289.
|
| + os._exit(rval)
|
| ++
|
| ++ def _unregister(self):
|
| ++ """
|
| ++ Unregister from the scheduler and close open files.
|
| ++ """
|
| ++ if self._elog_reg_id is not None:
|
| ++ self.scheduler.unregister(self._elog_reg_id)
|
| ++ self._elog_reg_id = None
|
| ++ if self._elog_reader_fd:
|
| ++ os.close(self._elog_reader_fd)
|
| ++ self._elog_reader_fd = None
|
| ++ if self._elog_keys is not None:
|
| ++ for key in self._elog_keys:
|
| ++ portage.elog.elog_process(key, self.settings,
|
| ++ phasefilter=("prerm", "postrm"))
|
| ++ self._elog_keys = None
|
| ++
|
| ++ super(MergeProcess, self)._unregister()
|
| +diff --git a/pym/portage/dbapi/vartree.py b/pym/portage/dbapi/vartree.py
|
| +index 7856c85..9ceaf4c 100644
|
| +--- a/pym/portage/dbapi/vartree.py
|
| ++++ b/pym/portage/dbapi/vartree.py
|
| +@@ -13,7 +13,8 @@ portage.proxy.lazyimport.lazyimport(globals(),
|
| + 'portage.dbapi._MergeProcess:MergeProcess',
|
| + 'portage.dep:dep_getkey,isjustname,match_from_list,' + \
|
| + 'use_reduce,_slot_re',
|
| +- 'portage.elog:elog_process,_preload_elog_modules',
|
| ++ 'portage.elog:collect_ebuild_messages,collect_messages,' + \
|
| ++ 'elog_process,_merge_logentries',
|
| + 'portage.locks:lockdir,unlockdir',
|
| + 'portage.output:bold,colorize',
|
| + 'portage.package.ebuild.doebuild:doebuild_environment,' + \
|
| +@@ -54,6 +55,7 @@ from portage import _unicode_encode
|
| +
|
| + from _emerge.AsynchronousLock import AsynchronousLock
|
| + from _emerge.EbuildBuildDir import EbuildBuildDir
|
| ++from _emerge.EbuildPhase import EbuildPhase
|
| + from _emerge.PollScheduler import PollScheduler
|
| + from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
|
| +
|
| +@@ -876,7 +878,7 @@ class vardbapi(dbapi):
|
| + def populate(self):
|
| + self._populate()
|
| +
|
| +- def _populate(self, scheduler=None):
|
| ++ def _populate(self):
|
| + owners_cache = vardbapi._owners_cache(self._vardb)
|
| + cached_hashes = set()
|
| + base_names = self._vardb._aux_cache["owners"]["base_names"]
|
| +@@ -900,10 +902,6 @@ class vardbapi(dbapi):
|
| +
|
| + # Cache any missing packages.
|
| + for cpv in uncached_pkgs:
|
| +-
|
| +- if scheduler is not None:
|
| +- scheduler.scheduleYield()
|
| +-
|
| + owners_cache.add(cpv)
|
| +
|
| + # Delete any stale cache.
|
| +@@ -917,12 +915,12 @@ class vardbapi(dbapi):
|
| +
|
| + return owners_cache
|
| +
|
| +- def get_owners(self, path_iter, scheduler=None):
|
| ++ def get_owners(self, path_iter):
|
| + """
|
| + @return the owners as a dblink -> set(files) mapping.
|
| + """
|
| + owners = {}
|
| +- for owner, f in self.iter_owners(path_iter, scheduler=scheduler):
|
| ++ for owner, f in self.iter_owners(path_iter):
|
| + owned_files = owners.get(owner)
|
| + if owned_files is None:
|
| + owned_files = set()
|
| +@@ -942,7 +940,7 @@ class vardbapi(dbapi):
|
| + owner_set.add(pkg_dblink)
|
| + return file_owners
|
| +
|
| +- def iter_owners(self, path_iter, scheduler=None):
|
| ++ def iter_owners(self, path_iter):
|
| + """
|
| + Iterate over tuples of (dblink, path). In order to avoid
|
| + consuming too many resources for too much time, resources
|
| +@@ -954,7 +952,7 @@ class vardbapi(dbapi):
|
| +
|
| + if not isinstance(path_iter, list):
|
| + path_iter = list(path_iter)
|
| +- owners_cache = self._populate(scheduler=scheduler)
|
| ++ owners_cache = self._populate()
|
| + vardb = self._vardb
|
| + root = vardb._eroot
|
| + hash_pkg = owners_cache._hash_pkg
|
| +@@ -1013,23 +1011,19 @@ class vardbapi(dbapi):
|
| + if dblink(cpv).isowner(path):
|
| + owners.append((cpv, path))
|
| +
|
| +- if scheduler is not None:
|
| +- scheduler.scheduleYield()
|
| +-
|
| + except StopIteration:
|
| + path_iter.append(path)
|
| + del owners[:]
|
| + dblink_cache.clear()
|
| + gc.collect()
|
| +- for x in self._iter_owners_low_mem(path_iter,
|
| +- scheduler=scheduler):
|
| ++ for x in self._iter_owners_low_mem(path_iter):
|
| + yield x
|
| + return
|
| + else:
|
| + for cpv, p in owners:
|
| + yield (dblink(cpv), p)
|
| +
|
| +- def _iter_owners_low_mem(self, path_list, scheduler=None):
|
| ++ def _iter_owners_low_mem(self, path_list):
|
| + """
|
| + This implemention will make a short-lived dblink instance (and
|
| + parse CONTENTS) for every single installed package. This is
|
| +@@ -1051,10 +1045,6 @@ class vardbapi(dbapi):
|
| +
|
| + root = self._vardb._eroot
|
| + for cpv in self._vardb.cpv_all():
|
| +-
|
| +- if scheduler is not None:
|
| +- scheduler.scheduleYield()
|
| +-
|
| + dblnk = self._vardb._dblink(cpv)
|
| +
|
| + for path, name, is_basename in path_info_list:
|
| +@@ -1195,12 +1185,8 @@ class dblink(object):
|
| + r')$'
|
| + )
|
| +
|
| +- # When looping over files for merge/unmerge, temporarily yield to the
|
| +- # scheduler each time this many files are processed.
|
| +- _file_merge_yield_interval = 20
|
| +-
|
| + def __init__(self, cat, pkg, myroot=None, settings=None, treetype=None,
|
| +- vartree=None, blockers=None, scheduler=None):
|
| ++ vartree=None, blockers=None, scheduler=None, pipe=None):
|
| + """
|
| + Creates a DBlink object for a given CPV.
|
| + The given CPV may not be present in the database already.
|
| +@@ -1259,6 +1245,7 @@ class dblink(object):
|
| + self._md5_merge_map = {}
|
| + self._hash_key = (self.myroot, self.mycpv)
|
| + self._protect_obj = None
|
| ++ self._pipe = pipe
|
| +
|
| + def __hash__(self):
|
| + return hash(self._hash_key)
|
| +@@ -1486,6 +1473,21 @@ class dblink(object):
|
| + " method is now unused.",
|
| + DeprecationWarning, stacklevel=2)
|
| +
|
| ++ background = False
|
| ++ if self._scheduler is None:
|
| ++ # We create a scheduler instance and use it to
|
| ++ # log unmerge output separately from merge output.
|
| ++ self._scheduler = PollScheduler().sched_iface
|
| ++ if self.settings.get("PORTAGE_BACKGROUND") == "subprocess":
|
| ++ if self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "1":
|
| ++ self.settings["PORTAGE_BACKGROUND"] = "1"
|
| ++ self.settings.backup_changes("PORTAGE_BACKGROUND")
|
| ++ background = True
|
| ++ else:
|
| ++ self.settings.pop("PORTAGE_BACKGROUND", None)
|
| ++ elif self.settings.get("PORTAGE_BACKGROUND") == "1":
|
| ++ background = True
|
| ++
|
| + self.vartree.dbapi._bump_mtime(self.mycpv)
|
| + showMessage = self._display_merge
|
| + if self.vartree.dbapi._categories is not None:
|
| +@@ -1503,7 +1505,7 @@ class dblink(object):
|
| + continue
|
| + others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
|
| + settings=self.settings, vartree=self.vartree,
|
| +- treetype="vartree"))
|
| ++ treetype="vartree", pipe=self._pipe))
|
| +
|
| + retval = self._security_check([self] + others_in_slot)
|
| + if retval:
|
| +@@ -1546,20 +1548,18 @@ class dblink(object):
|
| + scheduler = self._scheduler
|
| + retval = os.EX_OK
|
| + try:
|
| ++ builddir_lock = EbuildBuildDir(
|
| ++ scheduler=scheduler,
|
| ++ settings=self.settings)
|
| ++ builddir_lock.lock()
|
| ++ prepare_build_dirs(settings=self.settings, cleanup=True)
|
| ++ log_path = self.settings.get("PORTAGE_LOG_FILE")
|
| + if myebuildpath:
|
| +- builddir_lock = EbuildBuildDir(
|
| +- scheduler=(scheduler or PollScheduler().sched_iface),
|
| ++ phase = EbuildPhase(background=background,
|
| ++ phase=ebuild_phase, scheduler=scheduler,
|
| + settings=self.settings)
|
| +- builddir_lock.lock()
|
| +-
|
| +- prepare_build_dirs(settings=self.settings, cleanup=True)
|
| +- log_path = self.settings.get("PORTAGE_LOG_FILE")
|
| +-
|
| +- if scheduler is None:
|
| +- retval = _spawn_phase('prerm', self.settings)
|
| +- else:
|
| +- retval = scheduler.dblinkEbuildPhase(
|
| +- self, self.vartree.dbapi, myebuildpath, ebuild_phase)
|
| ++ phase.start()
|
| ++ retval = phase.wait()
|
| +
|
| + # XXX: Decide how to handle failures here.
|
| + if retval != os.EX_OK:
|
| +@@ -1581,11 +1581,11 @@ class dblink(object):
|
| +
|
| + if myebuildpath:
|
| + ebuild_phase = "postrm"
|
| +- if scheduler is None:
|
| +- retval = _spawn_phase(ebuild_phase, self.settings)
|
| +- else:
|
| +- retval = scheduler.dblinkEbuildPhase(
|
| +- self, self.vartree.dbapi, myebuildpath, ebuild_phase)
|
| ++ phase = EbuildPhase(background=background,
|
| ++ phase=ebuild_phase, scheduler=scheduler,
|
| ++ settings=self.settings)
|
| ++ phase.start()
|
| ++ retval = phase.wait()
|
| +
|
| + # XXX: Decide how to handle failures here.
|
| + if retval != os.EX_OK:
|
| +@@ -1667,9 +1667,7 @@ class dblink(object):
|
| +
|
| + self._eerror(ebuild_phase, msg_lines)
|
| +
|
| +- # process logs created during pre/postrm
|
| +- elog_process(self.mycpv, self.settings,
|
| +- phasefilter=('prerm', 'postrm'))
|
| ++ self._elog_process(phasefilter=("prerm", "postrm"))
|
| +
|
| + if retval == os.EX_OK and builddir_lock is not None:
|
| + # myebuildpath might be None, so ensure
|
| +@@ -1679,12 +1677,11 @@ class dblink(object):
|
| + self.pkg + ".ebuild")
|
| + doebuild_environment(myebuildpath, "cleanrm",
|
| + settings=self.settings, db=self.vartree.dbapi)
|
| +- if scheduler is None:
|
| +- _spawn_phase("cleanrm", self.settings)
|
| +- else:
|
| +- scheduler.dblinkEbuildPhase(
|
| +- self, self.vartree.dbapi,
|
| +- myebuildpath, "cleanrm")
|
| ++ phase = EbuildPhase(background=background,
|
| ++ phase="cleanrm", scheduler=scheduler,
|
| ++ settings=self.settings)
|
| ++ phase.start()
|
| ++ retval = phase.wait()
|
| + finally:
|
| + if builddir_lock is not None:
|
| + builddir_lock.unlock()
|
| +@@ -1728,11 +1725,18 @@ class dblink(object):
|
| + def _display_merge(self, msg, level=0, noiselevel=0):
|
| + if not self._verbose and noiselevel >= 0 and level < logging.WARN:
|
| + return
|
| +- if self._scheduler is not None:
|
| +- self._scheduler.dblinkDisplayMerge(self, msg,
|
| +- level=level, noiselevel=noiselevel)
|
| +- return
|
| +- writemsg_level(msg, level=level, noiselevel=noiselevel)
|
| ++ if self._scheduler is None:
|
| ++ writemsg_level(msg, level=level, noiselevel=noiselevel)
|
| ++ else:
|
| ++ log_path = self.settings.get("PORTAGE_LOG_FILE")
|
| ++ background = self.settings.get("PORTAGE_BACKGROUND") == "1"
|
| ++
|
| ++ if log_path is None:
|
| ++ if not (background and level < logging.WARN):
|
| ++ writemsg_level(msg, level=level, noiselevel=noiselevel)
|
| ++ else:
|
| ++ self._scheduler.output(msg,
|
| ++ background=background, log_path=log_path)
|
| +
|
| + def _unmerge_pkgfiles(self, pkgfiles, others_in_slot):
|
| + """
|
| +@@ -1750,7 +1754,6 @@ class dblink(object):
|
| + os = _os_merge
|
| + perf_md5 = perform_md5
|
| + showMessage = self._display_merge
|
| +- scheduler = self._scheduler
|
| +
|
| + if not pkgfiles:
|
| + showMessage(_("No package files given... Grabbing a set.\n"))
|
| +@@ -1766,7 +1769,7 @@ class dblink(object):
|
| + continue
|
| + others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
|
| + settings=self.settings,
|
| +- vartree=self.vartree, treetype="vartree"))
|
| ++ vartree=self.vartree, treetype="vartree", pipe=self._pipe))
|
| +
|
| + dest_root = self._eroot
|
| + dest_root_len = len(dest_root) - 1
|
| +@@ -1851,10 +1854,6 @@ class dblink(object):
|
| +
|
| + for i, objkey in enumerate(mykeys):
|
| +
|
| +- if scheduler is not None and \
|
| +- 0 == i % self._file_merge_yield_interval:
|
| +- scheduler.scheduleYield()
|
| +-
|
| + obj = normalize_path(objkey)
|
| + if os is _os_merge:
|
| + try:
|
| +@@ -2575,7 +2574,6 @@ class dblink(object):
|
| + plib_collisions = {}
|
| +
|
| + showMessage = self._display_merge
|
| +- scheduler = self._scheduler
|
| + stopmerge = False
|
| + collisions = []
|
| + destroot = self.settings['ROOT']
|
| +@@ -2585,10 +2583,6 @@ class dblink(object):
|
| + if i % 1000 == 0 and i != 0:
|
| + showMessage(_("%d files checked ...\n") % i)
|
| +
|
| +- if scheduler is not None and \
|
| +- 0 == i % self._file_merge_yield_interval:
|
| +- scheduler.scheduleYield()
|
| +-
|
| + dest_path = normalize_path(
|
| + os.path.join(destroot, f.lstrip(os.path.sep)))
|
| + try:
|
| +@@ -2697,7 +2691,6 @@ class dblink(object):
|
| + os = _os_merge
|
| +
|
| + showMessage = self._display_merge
|
| +- scheduler = self._scheduler
|
| +
|
| + file_paths = set()
|
| + for dblnk in installed_instances:
|
| +@@ -2706,10 +2699,6 @@ class dblink(object):
|
| + real_paths = set()
|
| + for i, path in enumerate(file_paths):
|
| +
|
| +- if scheduler is not None and \
|
| +- 0 == i % self._file_merge_yield_interval:
|
| +- scheduler.scheduleYield()
|
| +-
|
| + if os is _os_merge:
|
| + try:
|
| + _unicode_encode(path,
|
| +@@ -2769,36 +2758,56 @@ class dblink(object):
|
| + return 1
|
| +
|
| + def _eqawarn(self, phase, lines):
|
| +- from portage.elog.messages import eqawarn as _eqawarn
|
| +- if self._scheduler is None:
|
| +- for l in lines:
|
| +- _eqawarn(l, phase=phase, key=self.settings.mycpv)
|
| +- else:
|
| +- self._scheduler.dblinkElog(self,
|
| +- phase, _eqawarn, lines)
|
| ++ self._elog("eqawarn", phase, lines)
|
| +
|
| + def _eerror(self, phase, lines):
|
| +- from portage.elog.messages import eerror as _eerror
|
| ++ self._elog("eerror", phase, lines)
|
| ++
|
| ++ def _elog(self, funcname, phase, lines):
|
| ++ func = getattr(portage.elog.messages, funcname)
|
| + if self._scheduler is None:
|
| + for l in lines:
|
| +- _eerror(l, phase=phase, key=self.settings.mycpv)
|
| ++ func(l, phase=phase, key=self.mycpv)
|
| + else:
|
| +- self._scheduler.dblinkElog(self,
|
| +- phase, _eerror, lines)
|
| +-
|
| +- def _elog_subprocess(self, funcname, phase, lines):
|
| +- """
|
| +- Subprocesses call this in order to create elog messages in
|
| +- $T, for collection by the main process.
|
| +- """
|
| +- cmd = "source %s/isolated-functions.sh ; " % \
|
| +- portage._shell_quote(self.settings["PORTAGE_BIN_PATH"])
|
| +- for line in lines:
|
| +- cmd += "%s %s ; " % (funcname, portage._shell_quote(line))
|
| +- env = self.settings.environ()
|
| +- env['EBUILD_PHASE'] = phase
|
| +- subprocess.call([portage.const.BASH_BINARY, "-c", cmd],
|
| +- env=env)
|
| ++ background = self.settings.get("PORTAGE_BACKGROUND") == "1"
|
| ++ log_path = None
|
| ++ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
|
| ++ log_path = self.settings.get("PORTAGE_LOG_FILE")
|
| ++ out = portage.StringIO()
|
| ++ for line in lines:
|
| ++ func(line, phase=phase, key=self.mycpv, out=out)
|
| ++ msg = out.getvalue()
|
| ++ self._scheduler.output(msg,
|
| ++ background=background, log_path=log_path)
|
| ++
|
| ++ def _elog_process(self, phasefilter=None):
|
| ++ cpv = self.mycpv
|
| ++ if self._pipe is None:
|
| ++ elog_process(cpv, self.settings, phasefilter=phasefilter)
|
| ++ else:
|
| ++ logdir = os.path.join(self.settings["T"], "logging")
|
| ++ ebuild_logentries = collect_ebuild_messages(logdir)
|
| ++ py_logentries = collect_messages(key=cpv).get(cpv, {})
|
| ++ logentries = _merge_logentries(py_logentries, ebuild_logentries)
|
| ++ funcnames = {
|
| ++ "INFO": "einfo",
|
| ++ "LOG": "elog",
|
| ++ "WARN": "ewarn",
|
| ++ "QA": "eqawarn",
|
| ++ "ERROR": "eerror"
|
| ++ }
|
| ++ str_buffer = []
|
| ++ for phase, messages in logentries.items():
|
| ++ for key, lines in messages:
|
| ++ funcname = funcnames[key]
|
| ++ if isinstance(lines, basestring):
|
| ++ lines = [lines]
|
| ++ for line in lines:
|
| ++ fields = (funcname, phase, cpv, line.rstrip('\n'))
|
| ++ str_buffer.append(' '.join(fields))
|
| ++ str_buffer.append('\n')
|
| ++ if str_buffer:
|
| ++ os.write(self._pipe, _unicode_encode(''.join(str_buffer)))
|
| +
|
| + def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
|
| + mydbapi=None, prev_mtimes=None):
|
| +@@ -2813,7 +2822,6 @@ class dblink(object):
|
| + unmerges old version (if required)
|
| + calls doebuild(mydo=pkg_postinst)
|
| + calls env_update
|
| +- calls elog_process
|
| +
|
| + @param srcroot: Typically this is ${D}
|
| + @type srcroot: String (Path)
|
| +@@ -2923,7 +2931,7 @@ class dblink(object):
|
| + others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
|
| + settings=config(clone=self.settings),
|
| + vartree=self.vartree, treetype="vartree",
|
| +- scheduler=self._scheduler))
|
| ++ scheduler=self._scheduler, pipe=self._pipe))
|
| +
|
| + retval = self._security_check(others_in_slot)
|
| + if retval:
|
| +@@ -3069,11 +3077,7 @@ class dblink(object):
|
| + return 1
|
| +
|
| + # check for package collisions
|
| +- blockers = None
|
| +- if self._blockers is not None:
|
| +- # This is only supposed to be called when
|
| +- # the vdb is locked, like it is here.
|
| +- blockers = self._blockers()
|
| ++ blockers = self._blockers
|
| + if blockers is None:
|
| + blockers = []
|
| + collisions, plib_collisions = \
|
| +@@ -3148,8 +3152,7 @@ class dblink(object):
|
| + # get_owners is slow for large numbers of files, so
|
| + # don't look them all up.
|
| + collisions = collisions[:20]
|
| +- owners = self.vartree.dbapi._owners.get_owners(collisions,
|
| +- scheduler=self._scheduler)
|
| ++ owners = self.vartree.dbapi._owners.get_owners(collisions)
|
| + self.vartree.dbapi.flush_cache()
|
| +
|
| + for pkg, owned_files in owners.items():
|
| +@@ -3244,16 +3247,8 @@ class dblink(object):
|
| + cfgfiledict["IGNORE"] = 1
|
| + break
|
| +
|
| +- merge_task = MergeProcess(
|
| +- background=(self.settings.get('PORTAGE_BACKGROUND') == '1'),
|
| +- cfgfiledict=cfgfiledict, conf_mem_file=conf_mem_file, dblink=self,
|
| +- destroot=destroot,
|
| +- logfile=self.settings.get('PORTAGE_LOG_FILE'),
|
| +- scheduler=(scheduler or PollScheduler().sched_iface),
|
| +- srcroot=srcroot)
|
| +-
|
| +- merge_task.start()
|
| +- rval = merge_task.wait()
|
| ++ rval = self._merge_contents(srcroot, destroot, cfgfiledict,
|
| ++ conf_mem_file)
|
| + if rval != os.EX_OK:
|
| + return rval
|
| +
|
| +@@ -3454,7 +3449,7 @@ class dblink(object):
|
| +
|
| + return backup_p
|
| +
|
| +- def _merge_process(self, srcroot, destroot, cfgfiledict, conf_mem_file):
|
| ++ def _merge_contents(self, srcroot, destroot, cfgfiledict, conf_mem_file):
|
| +
|
| + cfgfiledict_orig = cfgfiledict.copy()
|
| +
|
| +@@ -3683,7 +3678,7 @@ class dblink(object):
|
| + msg.append(_("This file will be renamed to a different name:"))
|
| + msg.append(" '%s'" % backup_dest)
|
| + msg.append("")
|
| +- self._elog_subprocess("eerror", "preinst", msg)
|
| ++ self._eerror("preinst", msg)
|
| + if movefile(mydest, backup_dest,
|
| + mysettings=self.settings,
|
| + encoding=_encodings['merge']) is None:
|
| +@@ -3761,7 +3756,7 @@ class dblink(object):
|
| + msg.append(_("This file will be merged with a different name:"))
|
| + msg.append(" '%s'" % newdest)
|
| + msg.append("")
|
| +- self._elog_subprocess("eerror", "preinst", msg)
|
| ++ self._eerror("preinst", msg)
|
| + mydest = newdest
|
| +
|
| + elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
|
| +@@ -3845,65 +3840,6 @@ class dblink(object):
|
| + def merge(self, mergeroot, inforoot, myroot=None, myebuild=None, cleanup=0,
|
| + mydbapi=None, prev_mtimes=None):
|
| + """
|
| +- If portage is reinstalling itself, create temporary
|
| +- copies of PORTAGE_BIN_PATH and PORTAGE_PYM_PATH in order
|
| +- to avoid relying on the new versions which may be
|
| +- incompatible. Register an atexit hook to clean up the
|
| +- temporary directories. Pre-load elog modules here since
|
| +- we won't be able to later if they get unmerged (happens
|
| +- when namespace changes).
|
| +-
|
| +- @param myroot: ignored, self._eroot is used instead
|
| +- """
|
| +- myroot = None
|
| +- if self.vartree.dbapi._categories is not None:
|
| +- self.vartree.dbapi._categories = None
|
| +- reinstall_self = False
|
| +- if self.myroot == "/" and \
|
| +- match_from_list(PORTAGE_PACKAGE_ATOM, [self.mycpv]):
|
| +- inherited = frozenset(self.settings.get('INHERITED', '').split())
|
| +- if not self.vartree.dbapi.cpv_exists(self.mycpv) or \
|
| +- '9999' in self.mycpv or \
|
| +- 'git' in inherited or \
|
| +- 'git-2' in inherited:
|
| +- reinstall_self = True
|
| +-
|
| +- if reinstall_self:
|
| +- # Load lazily referenced portage submodules into memory,
|
| +- # so imports won't fail during portage upgrade/downgrade.
|
| +- portage.proxy.lazyimport._preload_portage_submodules()
|
| +- settings = self.settings
|
| +-
|
| +- # Make the temp directory inside $PORTAGE_TMPDIR/portage, since
|
| +- # it's common for /tmp and /var/tmp to be mounted with the
|
| +- # "noexec" option (see bug #346899).
|
| +- build_prefix = os.path.join(settings["PORTAGE_TMPDIR"], "portage")
|
| +- ensure_dirs(build_prefix)
|
| +- base_path_tmp = tempfile.mkdtemp(
|
| +- "", "._portage_reinstall_.", build_prefix)
|
| +- portage.process.atexit_register(shutil.rmtree, base_path_tmp)
|
| +- dir_perms = 0o755
|
| +- for subdir in "bin", "pym":
|
| +- var_name = "PORTAGE_%s_PATH" % subdir.upper()
|
| +- var_orig = settings[var_name]
|
| +- var_new = os.path.join(base_path_tmp, subdir)
|
| +- settings[var_name] = var_new
|
| +- settings.backup_changes(var_name)
|
| +- shutil.copytree(var_orig, var_new, symlinks=True)
|
| +- os.chmod(var_new, dir_perms)
|
| +- portage._bin_path = settings['PORTAGE_BIN_PATH']
|
| +- portage._pym_path = settings['PORTAGE_PYM_PATH']
|
| +- os.chmod(base_path_tmp, dir_perms)
|
| +- # This serves so pre-load the modules.
|
| +- _preload_elog_modules(self.settings)
|
| +-
|
| +- return self._merge(mergeroot, inforoot,
|
| +- myebuild=myebuild, cleanup=cleanup,
|
| +- mydbapi=mydbapi, prev_mtimes=prev_mtimes)
|
| +-
|
| +- def _merge(self, mergeroot, inforoot, myroot=None, myebuild=None, cleanup=0,
|
| +- mydbapi=None, prev_mtimes=None):
|
| +- """
|
| + @param myroot: ignored, self._eroot is used instead
|
| + """
|
| + myroot = None
|
| +@@ -3945,7 +3881,7 @@ class dblink(object):
|
| + self._scheduler.dblinkEbuildPhase(
|
| + self, mydbapi, myebuild, phase)
|
| +
|
| +- elog_process(self.mycpv, self.settings)
|
| ++ self._elog_process()
|
| +
|
| + if 'noclean' not in self.settings.features and \
|
| + (retval == os.EX_OK or \
|
| +@@ -4045,10 +3981,17 @@ def merge(mycat, mypkg, pkgloc, infloc,
|
| + writemsg(_("Permission denied: access('%s', W_OK)\n") % settings['EROOT'],
|
| + noiselevel=-1)
|
| + return errno.EACCES
|
| +- mylink = dblink(mycat, mypkg, settings=settings, treetype=mytree,
|
| +- vartree=vartree, blockers=blockers, scheduler=scheduler)
|
| +- return mylink.merge(pkgloc, infloc, myebuild=myebuild,
|
| +- mydbapi=mydbapi, prev_mtimes=prev_mtimes)
|
| ++ background = (settings.get('PORTAGE_BACKGROUND') == '1')
|
| ++ merge_task = MergeProcess(
|
| ++ dblink=dblink, mycat=mycat, mypkg=mypkg, settings=settings,
|
| ++ treetype=mytree, vartree=vartree,
|
| ++ scheduler=(scheduler or PollScheduler().sched_iface),
|
| ++ background=background, blockers=blockers, pkgloc=pkgloc,
|
| ++ infloc=infloc, myebuild=myebuild, mydbapi=mydbapi,
|
| ++ prev_mtimes=prev_mtimes, logfile=settings.get('PORTAGE_LOG_FILE'))
|
| ++ merge_task.start()
|
| ++ retcode = merge_task.wait()
|
| ++ return retcode
|
| +
|
| + def unmerge(cat, pkg, myroot=None, settings=None,
|
| + mytrimworld=None, vartree=None,
|
| +diff --git a/pym/portage/package/ebuild/_config/special_env_vars.py b/pym/portage/package/ebuild/_config/special_env_vars.py
|
| +index b807146..28f6612 100644
|
| +--- a/pym/portage/package/ebuild/_config/special_env_vars.py
|
| ++++ b/pym/portage/package/ebuild/_config/special_env_vars.py
|
| +@@ -141,7 +141,7 @@ environ_filter += [
|
| + "FETCHCOMMAND_HTTP", "FETCHCOMMAND_HTTPS",
|
| + "FETCHCOMMAND_RSYNC", "FETCHCOMMAND_SFTP",
|
| + "GENTOO_MIRRORS", "NOCONFMEM", "O",
|
| +- "PORTAGE_BACKGROUND",
|
| ++ "PORTAGE_BACKGROUND", "PORTAGE_BACKGROUND_UNMERGE",
|
| + "PORTAGE_BINHOST_CHUNKSIZE", "PORTAGE_BUILDIR_LOCKED", "PORTAGE_CALLER",
|
| + "PORTAGE_ELOG_CLASSES",
|
| + "PORTAGE_ELOG_MAILFROM", "PORTAGE_ELOG_MAILSUBJECT",
|
|
|