OLD | NEW |
1 #!/usr/bin/python2.6 | 1 #!/usr/bin/python2.6 |
2 # Copyright (c) 2010 The Chromium OS Authors. All rights reserved. | 2 # Copyright (c) 2010 The Chromium OS Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Program to run emerge in parallel, for significant speedup. | 6 """Program to run emerge in parallel, for significant speedup. |
7 | 7 |
8 Usage: | 8 Usage: |
9 ./parallel_emerge [--board=BOARD] [--workon=PKGS] [--no-workon-deps] | 9 ./parallel_emerge [--board=BOARD] [--workon=PKGS] [--no-workon-deps] |
10 [emerge args] package" | 10 [emerge args] package" |
(...skipping 20 matching lines...) Expand all Loading... |
31 or "--nodeps" write only access. | 31 or "--nodeps" write only access. |
32 Caveats: | 32 Caveats: |
33 * Some ebuild packages have incorrectly specified deps, and running | 33 * Some ebuild packages have incorrectly specified deps, and running |
34 them in parallel is more likely to bring out these failures. | 34 them in parallel is more likely to bring out these failures. |
35 * Some ebuilds (especially the build part) have complex dependencies | 35 * Some ebuilds (especially the build part) have complex dependencies |
36 that are not captured well by this script (it may be necessary to | 36 that are not captured well by this script (it may be necessary to |
37 install an old package to build, but then install a newer version | 37 install an old package to build, but then install a newer version |
38 of the same package for a runtime dep). | 38 of the same package for a runtime dep). |
39 """ | 39 """ |
40 | 40 |
| 41 import codecs |
41 import copy | 42 import copy |
42 import multiprocessing | 43 import multiprocessing |
43 import os | 44 import os |
44 import Queue | 45 import Queue |
45 import shlex | 46 import shlex |
| 47 import signal |
46 import sys | 48 import sys |
47 import tempfile | 49 import tempfile |
48 import time | 50 import time |
| 51 import traceback |
49 import urllib2 | 52 import urllib2 |
50 | 53 |
51 # If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On | 54 # If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On |
52 # Chromium OS, the default "portage" user doesn't have the necessary | 55 # Chromium OS, the default "portage" user doesn't have the necessary |
53 # permissions. It'd be easier if we could default to $USERNAME, but $USERNAME | 56 # permissions. It'd be easier if we could default to $USERNAME, but $USERNAME |
54 # is "root" here because we get called through sudo. | 57 # is "root" here because we get called through sudo. |
55 # | 58 # |
56 # We need to set this before importing any portage modules, because portage | 59 # We need to set this before importing any portage modules, because portage |
57 # looks up "PORTAGE_USERNAME" at import time. | 60 # looks up "PORTAGE_USERNAME" at import time. |
58 # | 61 # |
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
207 Typical usage: | 210 Typical usage: |
208 deps = DepGraphGenerator() | 211 deps = DepGraphGenerator() |
209 deps.Initialize(sys.argv[1:]) | 212 deps.Initialize(sys.argv[1:]) |
210 deps_tree, deps_info = deps.GenDependencyTree() | 213 deps_tree, deps_info = deps.GenDependencyTree() |
211 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info) | 214 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info) |
212 deps.PrintTree(deps_tree) | 215 deps.PrintTree(deps_tree) |
213 PrintDepsMap(deps_graph) | 216 PrintDepsMap(deps_graph) |
214 """ | 217 """ |
215 | 218 |
216 __slots__ = ["board", "emerge", "mandatory_source", "no_workon_deps", | 219 __slots__ = ["board", "emerge", "mandatory_source", "no_workon_deps", |
217 "package_db", "rebuild"] | 220 "package_db", "rebuild", "show_output"] |
218 | 221 |
219 def __init__(self): | 222 def __init__(self): |
220 self.board = None | 223 self.board = None |
221 self.emerge = EmergeData() | 224 self.emerge = EmergeData() |
222 self.mandatory_source = set() | 225 self.mandatory_source = set() |
223 self.no_workon_deps = False | 226 self.no_workon_deps = False |
224 self.package_db = {} | 227 self.package_db = {} |
225 self.rebuild = False | 228 self.rebuild = False |
| 229 self.show_output = False |
226 | 230 |
227 def ParseParallelEmergeArgs(self, argv): | 231 def ParseParallelEmergeArgs(self, argv): |
228 """Read the parallel emerge arguments from the command-line. | 232 """Read the parallel emerge arguments from the command-line. |
229 | 233 |
230 We need to be compatible with emerge arg format. We scrape arguments that | 234 We need to be compatible with emerge arg format. We scrape arguments that |
231 are specific to parallel_emerge, and pass through the rest directly to | 235 are specific to parallel_emerge, and pass through the rest directly to |
232 emerge. | 236 emerge. |
233 Args: | 237 Args: |
234 argv: arguments list | 238 argv: arguments list |
235 Returns: | 239 Returns: |
236 Arguments that don't belong to parallel_emerge | 240 Arguments that don't belong to parallel_emerge |
237 """ | 241 """ |
238 emerge_args = [] | 242 emerge_args = [] |
239 for arg in argv: | 243 for arg in argv: |
240 # Specifically match arguments that are specific to parallel_emerge, and | 244 # Specifically match arguments that are specific to parallel_emerge, and |
241 # pass through the rest. | 245 # pass through the rest. |
242 if arg.startswith("--board="): | 246 if arg.startswith("--board="): |
243 self.board = arg.replace("--board=", "") | 247 self.board = arg.replace("--board=", "") |
244 elif arg.startswith("--workon="): | 248 elif arg.startswith("--workon="): |
245 workon_str = arg.replace("--workon=", "") | 249 workon_str = arg.replace("--workon=", "") |
246 package_list = shlex.split(" ".join(shlex.split(workon_str))) | 250 package_list = shlex.split(" ".join(shlex.split(workon_str))) |
247 self.mandatory_source.update(package_list) | 251 self.mandatory_source.update(package_list) |
248 elif arg == "--no-workon-deps": | 252 elif arg == "--no-workon-deps": |
249 self.no_workon_deps = True | 253 self.no_workon_deps = True |
250 elif arg == "--rebuild": | 254 elif arg == "--rebuild": |
251 self.rebuild = True | 255 self.rebuild = True |
| 256 elif arg == "--show-output": |
| 257 self.show_output = True |
252 else: | 258 else: |
253 # Not one of our options, so pass through to emerge. | 259 # Not one of our options, so pass through to emerge. |
254 emerge_args.append(arg) | 260 emerge_args.append(arg) |
255 | 261 |
256 if self.rebuild: | 262 if self.rebuild: |
257 if self.no_workon_deps: | 263 if self.no_workon_deps: |
258 print "--rebuild is not compatible with --no-workon-deps" | 264 print "--rebuild is not compatible with --no-workon-deps" |
259 sys.exit(1) | 265 sys.exit(1) |
260 | 266 |
261 return emerge_args | 267 return emerge_args |
(...skipping 798 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1060 """Print dependency graph, for each package list it's prerequisites.""" | 1066 """Print dependency graph, for each package list it's prerequisites.""" |
1061 for i in deps_map: | 1067 for i in deps_map: |
1062 print "%s: (%s) needs" % (i, deps_map[i]["action"]) | 1068 print "%s: (%s) needs" % (i, deps_map[i]["action"]) |
1063 needs = deps_map[i]["needs"] | 1069 needs = deps_map[i]["needs"] |
1064 for j in needs: | 1070 for j in needs: |
1065 print " %s" % (j) | 1071 print " %s" % (j) |
1066 if not needs: | 1072 if not needs: |
1067 print " no dependencies" | 1073 print " no dependencies" |
1068 | 1074 |
1069 | 1075 |
1070 def EmergeWorker(task_queue, done_queue, emerge, package_db): | 1076 class EmergeJobState(object): |
| 1077 __slots__ = ["done", "filename", "last_output_seek", "last_output_timestamp", |
| 1078 "pkgname", "retcode", "start_timestamp", "target"] |
| 1079 |
| 1080 def __init__(self, target, pkgname, done, filename, start_timestamp, |
| 1081 retcode=None): |
| 1082 |
| 1083 # The full name of the target we're building (e.g. |
| 1084 # chromeos-base/chromeos-0.0.1-r60) |
| 1085 self.target = target |
| 1086 |
| 1087 # The short name of the target we're building (e.g. chromeos-0.0.1-r60) |
| 1088 self.pkgname = pkgname |
| 1089 |
| 1090 # Whether the job is done. (True if the job is done; false otherwise.) |
| 1091 self.done = done |
| 1092 |
| 1093 # The filename where output is currently stored. |
| 1094 self.filename = filename |
| 1095 |
| 1096 # The location (in bytes) of the end of the last complete line we printed. |
| 1097 # This starts off at zero. We use this to jump to the right place when we |
| 1098 # print output from the same ebuild multiple times. |
| 1099 self.last_output_seek = 0 |
| 1100 |
| 1101 # The timestamp of the last time we printed output. Since we haven't |
| 1102 # printed output yet, this starts at zero. |
| 1103 self.last_output_timestamp = 0 |
| 1104 |
| 1105 # The return code of our job, if the job is actually finished. |
| 1106 self.retcode = retcode |
| 1107 |
| 1108 # The timestamp when our job started. |
| 1109 self.start_timestamp = start_timestamp |
| 1110 |
| 1111 |
| 1112 def EmergeWorker(task_queue, job_queue, emerge, package_db): |
1071 """This worker emerges any packages given to it on the task_queue. | 1113 """This worker emerges any packages given to it on the task_queue. |
1072 | 1114 |
1073 Args: | 1115 Args: |
1074 task_queue: The queue of tasks for this worker to do. | 1116 task_queue: The queue of tasks for this worker to do. |
1075 done_queue: The queue of results from the worker. | 1117 job_queue: The queue of results from the worker. |
1076 emerge: An EmergeData() object. | 1118 emerge: An EmergeData() object. |
1077 package_db: A dict, mapping package ids to portage Package objects. | 1119 package_db: A dict, mapping package ids to portage Package objects. |
1078 | 1120 |
1079 It expects package identifiers to be passed to it via task_queue. When | 1121 It expects package identifiers to be passed to it via task_queue. When |
1080 the package is merged, it pushes (target, retval, outputstr) into the | 1122 a task is started, it pushes the (target, filename) to the started_queue. |
1081 done_queue. | 1123 The output is stored in filename. When a merge starts or finishes, we push |
| 1124 EmergeJobState objects to the job_queue. |
1082 """ | 1125 """ |
1083 | 1126 |
| 1127 def ExitHandler(signum, frame): |
| 1128 # Remove our signal handlers so we don't get called recursively. |
| 1129 signal.signal(signal.SIGINT, signal.SIG_DFL) |
| 1130 signal.signal(signal.SIGTERM, signal.SIG_DFL) |
| 1131 |
| 1132 # Try to exit cleanly |
| 1133 sys.exit(1) |
| 1134 |
| 1135 # Ensure that we exit quietly and cleanly, if possible, when we receive |
| 1136 # SIGTERM or SIGINT signals. By default, when the user hits CTRL-C, all |
| 1137 # of the child processes will print details about KeyboardInterrupt |
| 1138 # exceptions, which isn't very helpful. |
| 1139 signal.signal(signal.SIGINT, ExitHandler) |
| 1140 signal.signal(signal.SIGTERM, ExitHandler) |
| 1141 |
1084 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb | 1142 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb |
1085 opts, spinner = emerge.opts, emerge.spinner | 1143 opts, spinner = emerge.opts, emerge.spinner |
1086 opts["--nodeps"] = True | 1144 opts["--nodeps"] = True |
1087 while True: | 1145 while True: |
1088 # Wait for a new item to show up on the queue. This is a blocking wait, | 1146 # Wait for a new item to show up on the queue. This is a blocking wait, |
1089 # so if there's nothing to do, we just sit here. | 1147 # so if there's nothing to do, we just sit here. |
1090 target = task_queue.get() | 1148 target = task_queue.get() |
1091 print "Emerging", target | |
1092 db_pkg = package_db[target] | 1149 db_pkg = package_db[target] |
1093 db_pkg.root_config = emerge.root_config | 1150 db_pkg.root_config = emerge.root_config |
1094 install_list = [db_pkg] | 1151 install_list = [db_pkg] |
1095 output = tempfile.TemporaryFile() | 1152 pkgname = db_pkg.pf |
1096 outputstr = "" | 1153 output = tempfile.NamedTemporaryFile(prefix=pkgname + "-", delete=False) |
| 1154 start_timestamp = time.time() |
| 1155 job = EmergeJobState(target, pkgname, False, output.name, start_timestamp) |
| 1156 job_queue.put(job) |
1097 if "--pretend" in opts: | 1157 if "--pretend" in opts: |
1098 retval = 0 | 1158 retcode = 0 |
1099 else: | 1159 else: |
1100 save_stdout = sys.stdout | 1160 save_stdout = sys.stdout |
1101 save_stderr = sys.stderr | 1161 save_stderr = sys.stderr |
1102 try: | 1162 try: |
1103 sys.stdout = output | 1163 sys.stdout = output |
1104 sys.stderr = output | 1164 sys.stderr = output |
1105 scheduler = Scheduler(settings, trees, mtimedb, opts, spinner, | 1165 scheduler = Scheduler(settings, trees, mtimedb, opts, spinner, |
1106 install_list, [], emerge.scheduler_graph) | 1166 install_list, [], emerge.scheduler_graph) |
1107 retval = scheduler.merge() | 1167 retcode = scheduler.merge() |
| 1168 except Exception: |
| 1169 traceback.print_exc(file=output) |
| 1170 retcode = 1 |
1108 finally: | 1171 finally: |
1109 sys.stdout = save_stdout | 1172 sys.stdout = save_stdout |
1110 sys.stderr = save_stderr | 1173 sys.stderr = save_stderr |
1111 if retval is None: | 1174 output.close() |
1112 retval = 0 | 1175 if retcode is None: |
1113 if retval != 0: | 1176 retcode = 0 |
1114 output.seek(0) | |
1115 outputstr = output.read() | |
1116 | 1177 |
1117 done_queue.put((target, retval, outputstr)) | 1178 job = EmergeJobState(target, pkgname, True, output.name, start_timestamp, |
| 1179 retcode) |
| 1180 job_queue.put(job) |
1118 | 1181 |
1119 | 1182 |
1120 class EmergeQueue(object): | 1183 class EmergeQueue(object): |
1121 """Class to schedule emerge jobs according to a dependency graph.""" | 1184 """Class to schedule emerge jobs according to a dependency graph.""" |
1122 | 1185 |
1123 def __init__(self, deps_map, emerge, package_db): | 1186 def __init__(self, deps_map, emerge, package_db, show_output): |
1124 # Store the dependency graph. | 1187 # Store the dependency graph. |
1125 self._deps_map = deps_map | 1188 self._deps_map = deps_map |
1126 # Initialize the running queue to empty | 1189 # Initialize the running queue to empty |
1127 self._jobs = set() | 1190 self._jobs = {} |
1128 # List of total package installs represented in deps_map. | 1191 # List of total package installs represented in deps_map. |
1129 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"] | 1192 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"] |
1130 self._total_jobs = len(install_jobs) | 1193 self._total_jobs = len(install_jobs) |
| 1194 self._show_output = show_output |
1131 | 1195 |
1132 if "--pretend" in emerge.opts: | 1196 if "--pretend" in emerge.opts: |
1133 print "Skipping merge because of --pretend mode." | 1197 print "Skipping merge because of --pretend mode." |
1134 sys.exit(0) | 1198 sys.exit(0) |
1135 | 1199 |
1136 # Setup scheduler graph object. This is used by the child processes | 1200 # Setup scheduler graph object. This is used by the child processes |
1137 # to help schedule jobs. | 1201 # to help schedule jobs. |
1138 emerge.scheduler_graph = emerge.depgraph.schedulerGraph() | 1202 emerge.scheduler_graph = emerge.depgraph.schedulerGraph() |
1139 | 1203 |
1140 procs = min(self._total_jobs, | 1204 procs = min(self._total_jobs, |
1141 emerge.opts.get("--jobs", multiprocessing.cpu_count())) | 1205 emerge.opts.get("--jobs", multiprocessing.cpu_count())) |
1142 self._emerge_queue = multiprocessing.Queue() | 1206 self._emerge_queue = multiprocessing.Queue() |
1143 self._done_queue = multiprocessing.Queue() | 1207 self._job_queue = multiprocessing.Queue() |
1144 args = (self._emerge_queue, self._done_queue, emerge, package_db) | 1208 args = (self._emerge_queue, self._job_queue, emerge, package_db) |
1145 self._pool = multiprocessing.Pool(procs, EmergeWorker, args) | 1209 self._pool = multiprocessing.Pool(procs, EmergeWorker, args) |
1146 | 1210 |
1147 # Initialize the failed queue to empty. | 1211 # Initialize the failed queue to empty. |
1148 self._retry_queue = [] | 1212 self._retry_queue = [] |
1149 self._failed = {} | 1213 self._failed = set() |
1150 | 1214 |
1151 # Print an update before we launch the merges. | 1215 # Print an update before we launch the merges. |
1152 self._Status() | 1216 self._Status() |
1153 | 1217 |
| 1218 # Setup an exit handler so that we print nice messages if we are |
| 1219 # terminated. |
| 1220 self._SetupExitHandler() |
| 1221 |
| 1222 # Schedule our jobs. |
1154 for target, info in deps_map.items(): | 1223 for target, info in deps_map.items(): |
1155 if not info["needs"]: | 1224 if not info["needs"]: |
1156 self._Schedule(target) | 1225 self._Schedule(target) |
1157 | 1226 |
| 1227 def _SetupExitHandler(self): |
| 1228 |
| 1229 def ExitHandler(signum, frame): |
| 1230 |
| 1231 # Kill our signal handlers so we don't get called recursively |
| 1232 signal.signal(signal.SIGINT, signal.SIG_DFL) |
| 1233 signal.signal(signal.SIGTERM, signal.SIG_DFL) |
| 1234 |
| 1235 # Print our current job status |
| 1236 for target, job in self._jobs.iteritems(): |
| 1237 if job: |
| 1238 self._PrintJob(job) |
| 1239 os.unlink(job.filename) |
| 1240 |
| 1241 # Notify the user that we are exiting |
| 1242 print "Exiting on signal %s" % signum |
| 1243 sys.exit(1) |
| 1244 |
| 1245 # Print out job status when we are killed |
| 1246 signal.signal(signal.SIGINT, ExitHandler) |
| 1247 signal.signal(signal.SIGTERM, ExitHandler) |
| 1248 |
1158 def _Schedule(self, target): | 1249 def _Schedule(self, target): |
1159 # We maintain a tree of all deps, if this doesn't need | 1250 # We maintain a tree of all deps, if this doesn't need |
1160 # to be installed just free up it's children and continue. | 1251 # to be installed just free up it's children and continue. |
1161 # It is possible to reinstall deps of deps, without reinstalling | 1252 # It is possible to reinstall deps of deps, without reinstalling |
1162 # first level deps, like so: | 1253 # first level deps, like so: |
1163 # chromeos (merge) -> eselect (nomerge) -> python (merge) | 1254 # chromeos (merge) -> eselect (nomerge) -> python (merge) |
1164 if self._deps_map[target]["action"] == "nomerge": | 1255 if self._deps_map[target]["action"] == "nomerge": |
1165 self._Finish(target) | 1256 self._Finish(target) |
1166 else: | 1257 else: |
1167 # Kick off the build if it's marked to be built. | 1258 # Kick off the build if it's marked to be built. |
1168 self._jobs.add(target) | 1259 self._jobs[target] = None |
1169 self._emerge_queue.put(target) | 1260 self._emerge_queue.put(target) |
1170 | 1261 |
1171 def _LoadAvg(self): | 1262 def _LoadAvg(self): |
1172 loads = open("/proc/loadavg", "r").readline().split()[:3] | 1263 loads = open("/proc/loadavg", "r").readline().split()[:3] |
1173 return " ".join(loads) | 1264 return " ".join(loads) |
1174 | 1265 |
| 1266 def _PrintJob(self, job): |
| 1267 """Print output so far of specified job""" |
| 1268 |
| 1269 # Calculate how long the job has been running. |
| 1270 current_time = time.time() |
| 1271 seconds = current_time - job.start_timestamp |
| 1272 |
| 1273 # Note that we've printed out the job so far. |
| 1274 job.last_output_timestamp = current_time |
| 1275 |
| 1276 # Note that we're starting the job |
| 1277 info = "job %s (%dm%.1fs) ===" % (job.pkgname, seconds / 60, seconds % 60) |
| 1278 if job.last_output_seek: |
| 1279 print "=== Continue output for %s " % info |
| 1280 else: |
| 1281 print "=== Start output for %s ===" % info |
| 1282 |
| 1283 # Print actual output from job |
| 1284 f = codecs.open(job.filename, encoding='utf-8', errors='replace') |
| 1285 f.seek(job.last_output_seek) |
| 1286 prefix = job.pkgname + ":" |
| 1287 for line in f: |
| 1288 |
| 1289 # Save off our position in the file |
| 1290 if line and line[-1] == "\n": |
| 1291 job.last_output_seek = f.tell() |
| 1292 line = line[:-1] |
| 1293 |
| 1294 # Print our line |
| 1295 print prefix, line.encode('utf-8', 'replace') |
| 1296 f.close() |
| 1297 |
| 1298 # Note end of output section |
| 1299 if job.done: |
| 1300 print "=== Complete: %s ===" % info |
| 1301 else: |
| 1302 print "=== Still running: %s ===" % info |
| 1303 |
| 1304 |
1175 def _Status(self): | 1305 def _Status(self): |
1176 """Print status.""" | 1306 """Print status.""" |
1177 seconds = time.time() - GLOBAL_START | 1307 current_time = time.time() |
| 1308 seconds = current_time - GLOBAL_START |
1178 line = ("Pending %s, Ready %s, Running %s, Retrying %s, Total %s " | 1309 line = ("Pending %s, Ready %s, Running %s, Retrying %s, Total %s " |
1179 "[Time %dm%.1fs Load %s]") | 1310 "[Time %dm%.1fs Load %s]") |
1180 qsize = self._emerge_queue.qsize() | 1311 qsize = self._emerge_queue.qsize() |
1181 print line % (len(self._deps_map), qsize, len(self._jobs) - qsize, | 1312 print line % (len(self._deps_map), qsize, len(self._jobs) - qsize, |
1182 len(self._retry_queue), self._total_jobs, | 1313 len(self._retry_queue), self._total_jobs, |
1183 seconds / 60, seconds % 60, self._LoadAvg()) | 1314 seconds / 60, seconds % 60, self._LoadAvg()) |
1184 | 1315 |
| 1316 # Print interim output every minute if --show-output is used. Otherwise, |
| 1317 # only print output if a job has been running for 60 minutes or more. |
| 1318 if self._show_output: |
| 1319 interval = 60 |
| 1320 else: |
| 1321 interval = 60 * 60 |
| 1322 for target, job in self._jobs.iteritems(): |
| 1323 if job: |
| 1324 last_timestamp = max(job.start_timestamp, job.last_output_timestamp) |
| 1325 if last_timestamp + interval < current_time: |
| 1326 self._PrintJob(job) |
| 1327 |
1185 def _Finish(self, target): | 1328 def _Finish(self, target): |
1186 """Mark a target as completed and unblock dependecies.""" | 1329 """Mark a target as completed and unblock dependecies.""" |
1187 for dep in self._deps_map[target]["provides"]: | 1330 for dep in self._deps_map[target]["provides"]: |
1188 del self._deps_map[dep]["needs"][target] | 1331 del self._deps_map[dep]["needs"][target] |
1189 if not self._deps_map[dep]["needs"]: | 1332 if not self._deps_map[dep]["needs"]: |
1190 self._Schedule(dep) | 1333 self._Schedule(dep) |
1191 self._deps_map.pop(target) | 1334 self._deps_map.pop(target) |
1192 | 1335 |
1193 def _Retry(self): | 1336 def _Retry(self): |
1194 if self._retry_queue: | 1337 if self._retry_queue: |
1195 target = self._retry_queue.pop(0) | 1338 target = self._retry_queue.pop(0) |
1196 self._Schedule(target) | 1339 self._Schedule(target) |
1197 print "Retrying emerge of %s." % target | 1340 print "Retrying emerge of %s." % target |
1198 | 1341 |
1199 def Run(self): | 1342 def Run(self): |
1200 """Run through the scheduled ebuilds. | 1343 """Run through the scheduled ebuilds. |
1201 | 1344 |
1202 Keep running so long as we have uninstalled packages in the | 1345 Keep running so long as we have uninstalled packages in the |
1203 dependency graph to merge. | 1346 dependency graph to merge. |
1204 """ | 1347 """ |
1205 while self._deps_map: | 1348 while self._deps_map: |
1206 # Check here that we are actually waiting for something. | 1349 # Check here that we are actually waiting for something. |
1207 if (self._emerge_queue.empty() and | 1350 if (self._emerge_queue.empty() and |
1208 self._done_queue.empty() and | 1351 self._job_queue.empty() and |
1209 not self._jobs and | 1352 not self._jobs and |
1210 self._deps_map): | 1353 self._deps_map): |
1211 # If we have failed on a package, retry it now. | 1354 # If we have failed on a package, retry it now. |
1212 if self._retry_queue: | 1355 if self._retry_queue: |
1213 self._Retry() | 1356 self._Retry() |
1214 # If we have failed a package twice, just give up. | 1357 # If we have failed a package twice, just give up. |
1215 elif self._failed: | 1358 elif self._failed: |
1216 for failure, output in self._failed.items(): | 1359 for failure in self._failed: |
1217 print "Package failed: %s" % failure | 1360 print "Package failed: %s" % failure |
1218 print output | |
1219 PrintDepsMap(self._deps_map) | 1361 PrintDepsMap(self._deps_map) |
1220 print "Packages failed: %s" % " ,".join(self._failed.keys()) | 1362 print "Packages failed: %s" % " ,".join(self._failed) |
1221 sys.exit(1) | 1363 sys.exit(1) |
1222 # If we have dependency cycles. | 1364 # If we have dependency cycles. |
1223 else: | 1365 else: |
1224 print "Deadlock! Circular dependencies!" | 1366 print "Deadlock! Circular dependencies!" |
1225 PrintDepsMap(self._deps_map) | 1367 PrintDepsMap(self._deps_map) |
1226 sys.exit(1) | 1368 sys.exit(1) |
1227 | 1369 |
1228 try: | 1370 try: |
1229 target, retcode, output = self._done_queue.get(timeout=5) | 1371 job = self._job_queue.get(timeout=5) |
1230 except Queue.Empty: | 1372 except Queue.Empty: |
1231 # Print an update. | 1373 # Print an update. |
1232 self._Status() | 1374 self._Status() |
1233 continue | 1375 continue |
1234 | 1376 |
1235 self._jobs.discard(target) | 1377 target = job.target |
1236 | 1378 |
1237 # Print if necessary. | 1379 if not job.done: |
1238 if retcode != 0: | 1380 self._jobs[target] = job |
1239 print output | 1381 print "Started %s (logged in %s)" % (target, job.filename) |
1240 if retcode != 0: | 1382 continue |
| 1383 |
| 1384 # Print output of job |
| 1385 if self._show_output or job.retcode != 0: |
| 1386 self._PrintJob(job) |
| 1387 os.unlink(job.filename) |
| 1388 del self._jobs[target] |
| 1389 |
| 1390 seconds = time.time() - job.start_timestamp |
| 1391 details = "%s (in %dm%.1fs)" % (target, seconds / 60, seconds % 60) |
| 1392 |
| 1393 # Complain if necessary. |
| 1394 if job.retcode != 0: |
1241 # Handle job failure. | 1395 # Handle job failure. |
1242 if target in self._failed: | 1396 if target in self._failed: |
1243 # If this job has failed previously, give up. | 1397 # If this job has failed previously, give up. |
1244 print "Failed %s. Your build has failed." % target | 1398 print "Failed %s. Your build has failed." % details |
1245 else: | 1399 else: |
1246 # Queue up this build to try again after a long while. | 1400 # Queue up this build to try again after a long while. |
1247 self._retry_queue.append(target) | 1401 self._retry_queue.append(target) |
1248 self._failed[target] = 1 | 1402 self._failed.add(target) |
1249 print "Failed %s, retrying later." % target | 1403 print "Failed %s, retrying later." % details |
1250 else: | 1404 else: |
1251 if target in self._failed and self._retry_queue: | 1405 if target in self._failed and self._retry_queue: |
1252 # If we have successfully retried a failed package, and there | 1406 # If we have successfully retried a failed package, and there |
1253 # are more failed packages, try the next one. We will only have | 1407 # are more failed packages, try the next one. We will only have |
1254 # one retrying package actively running at a time. | 1408 # one retrying package actively running at a time. |
1255 self._Retry() | 1409 self._Retry() |
1256 | 1410 |
1257 print "Completed %s" % target | 1411 print "Completed %s" % details |
1258 # Mark as completed and unblock waiting ebuilds. | 1412 # Mark as completed and unblock waiting ebuilds. |
1259 self._Finish(target) | 1413 self._Finish(target) |
1260 | 1414 |
1261 # Print an update. | 1415 # Print an update. |
1262 self._Status() | 1416 self._Status() |
1263 | 1417 |
1264 | 1418 |
1265 def main(): | 1419 def main(): |
1266 | 1420 |
1267 deps = DepGraphGenerator() | 1421 deps = DepGraphGenerator() |
(...skipping 30 matching lines...) Expand all Loading... |
1298 deps.PrintTree(deps_tree) | 1452 deps.PrintTree(deps_tree) |
1299 | 1453 |
1300 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info) | 1454 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info) |
1301 | 1455 |
1302 # OK, time to print out our progress so far. | 1456 # OK, time to print out our progress so far. |
1303 deps.PrintInstallPlan(deps_graph) | 1457 deps.PrintInstallPlan(deps_graph) |
1304 if "--tree" in emerge.opts: | 1458 if "--tree" in emerge.opts: |
1305 PrintDepsMap(deps_graph) | 1459 PrintDepsMap(deps_graph) |
1306 | 1460 |
1307 # Run the queued emerges. | 1461 # Run the queued emerges. |
1308 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db) | 1462 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db, deps.show_output) |
1309 scheduler.Run() | 1463 scheduler.Run() |
1310 | 1464 |
1311 # Update world. | 1465 # Update world. |
1312 if ("--oneshot" not in emerge.opts and | 1466 if ("--oneshot" not in emerge.opts and |
1313 "--pretend" not in emerge.opts): | 1467 "--pretend" not in emerge.opts): |
1314 world_set = emerge.root_config.sets["selected"] | 1468 world_set = emerge.root_config.sets["selected"] |
1315 new_world_pkgs = [] | 1469 new_world_pkgs = [] |
1316 root = emerge.settings["ROOT"] | 1470 root = emerge.settings["ROOT"] |
1317 final_db = emerge.depgraph._dynamic_config.mydbapi[root] | 1471 final_db = emerge.depgraph._dynamic_config.mydbapi[root] |
1318 for pkg in emerge.cmdline_packages: | 1472 for pkg in emerge.cmdline_packages: |
1319 for db_pkg in final_db.match_pkgs(pkg): | 1473 for db_pkg in final_db.match_pkgs(pkg): |
1320 print "Adding %s to world" % db_pkg.cp | 1474 print "Adding %s to world" % db_pkg.cp |
1321 new_world_pkgs.append(db_pkg.cp) | 1475 new_world_pkgs.append(db_pkg.cp) |
1322 if new_world_pkgs: | 1476 if new_world_pkgs: |
1323 world_set.update(new_world_pkgs) | 1477 world_set.update(new_world_pkgs) |
1324 | 1478 |
1325 # Update environment (library cache, symlinks, etc.) | 1479 # Update environment (library cache, symlinks, etc.) |
1326 if deps.board and "--pretend" not in emerge.opts: | 1480 if deps.board and "--pretend" not in emerge.opts: |
1327 portage.env_update() | 1481 portage.env_update() |
1328 | 1482 |
1329 print "Done" | 1483 print "Done" |
1330 | 1484 |
1331 if __name__ == "__main__": | 1485 if __name__ == "__main__": |
1332 main() | 1486 main() |
OLD | NEW |