| OLD | NEW |
| 1 #!/usr/bin/python | 1 #!/usr/bin/python |
| 2 # Copyright (c) 2010 The Chromium OS Authors. All rights reserved. | 2 # Copyright (c) 2010 The Chromium OS Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
| 5 | 5 |
| 6 import datetime | 6 import datetime |
| 7 import multiprocessing | 7 import multiprocessing |
| 8 import optparse | 8 import optparse |
| 9 import os | 9 import os |
| 10 import re | 10 import re |
| (...skipping 194 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 205 def RemoteUpload(files, pool=10): | 205 def RemoteUpload(files, pool=10): |
| 206 """Upload to google storage. | 206 """Upload to google storage. |
| 207 | 207 |
| 208 Create a pool of process and call _GsUpload with the proper arguments. | 208 Create a pool of process and call _GsUpload with the proper arguments. |
| 209 | 209 |
| 210 Args: | 210 Args: |
| 211 files: dictionary with keys to local files and values to remote path. | 211 files: dictionary with keys to local files and values to remote path. |
| 212 pool: integer of maximum proesses to have at the same time. | 212 pool: integer of maximum proesses to have at the same time. |
| 213 | 213 |
| 214 Returns: | 214 Returns: |
| 215 Return a list of tuple arguments of the failed uploads | 215 Return a set of tuple arguments of the failed uploads |
| 216 """ | 216 """ |
| 217 # TODO(scottz) port this to use _RunManyParallel when it is available in | 217 # TODO(scottz) port this to use _RunManyParallel when it is available in |
| 218 # cros_build_lib | 218 # cros_build_lib |
| 219 pool = multiprocessing.Pool(processes=pool) | 219 pool = multiprocessing.Pool(processes=pool) |
| 220 workers = [] | 220 workers = [] |
| 221 for local_file, remote_path in files.iteritems(): | 221 for local_file, remote_path in files.iteritems(): |
| 222 workers.append((local_file, remote_path)) | 222 workers.append((local_file, remote_path)) |
| 223 | 223 |
| 224 result = pool.map_async(_GsUpload, workers, chunksize=1) | 224 result = pool.map_async(_GsUpload, workers, chunksize=1) |
| 225 while True: | 225 while True: |
| 226 try: | 226 try: |
| 227 return result.get(60*60) | 227 return set(result.get(60*60)) |
| 228 except multiprocessing.TimeoutError: | 228 except multiprocessing.TimeoutError: |
| 229 pass | 229 pass |
| 230 | 230 |
| 231 | 231 |
| 232 def GenerateUploadDict(local_path, gs_path, strip_str): | 232 def GenerateUploadDict(local_path, gs_path, strip_str): |
| 233 """Build a dictionary of local remote file key pairs for gsutil to upload. | 233 """Build a dictionary of local remote file key pairs for gsutil to upload. |
| 234 | 234 |
| 235 Args: | 235 Args: |
| 236 local_path: A path to the file on the local hard drive. | 236 local_path: A path to the file on the local hard drive. |
| 237 gs_path: Path to upload in Google Storage. | 237 gs_path: Path to upload in Google Storage. |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 276 package_path = os.path.join(board_path, 'packages') | 276 package_path = os.path.join(board_path, 'packages') |
| 277 package_string = board | 277 package_string = board |
| 278 strip_pattern = board_path | 278 strip_pattern = board_path |
| 279 gs_path = os.path.join(bucket, _GS_BOARD_PATH % {'board': board, | 279 gs_path = os.path.join(bucket, _GS_BOARD_PATH % {'board': board, |
| 280 'version': version}) | 280 'version': version}) |
| 281 | 281 |
| 282 upload_files = GenerateUploadDict(package_path, gs_path, strip_pattern) | 282 upload_files = GenerateUploadDict(package_path, gs_path, strip_pattern) |
| 283 | 283 |
| 284 print 'Uploading %s' % package_string | 284 print 'Uploading %s' % package_string |
| 285 failed_uploads = RemoteUpload(upload_files) | 285 failed_uploads = RemoteUpload(upload_files) |
| 286 if failed_uploads: | 286 if len(failed_uploads) > 1 or (None not in failed_uploads): |
| 287 raise UploadFailed('Error uploading:\n%s' % '\n'.join(failed_uploads)) | 287 error_msg = ['%s -> %s\n' % args for args in failed_uploads] |
| 288 raise UploadFailed('Error uploading:\n%s' % error_msg) |
| 288 | 289 |
| 289 if git_file: | 290 if git_file: |
| 290 RevGitFile(git_file, package_string, version) | 291 RevGitFile(git_file, package_string, version) |
| 291 | 292 |
| 292 | 293 |
| 293 def usage(parser, msg): | 294 def usage(parser, msg): |
| 294 """Display usage message and parser help then exit with 1.""" | 295 """Display usage message and parser help then exit with 1.""" |
| 295 print >> sys.stderr, msg | 296 print >> sys.stderr, msg |
| 296 parser.print_help() | 297 parser.print_help() |
| 297 sys.exit(1) | 298 sys.exit(1) |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 335 if options.sync_host: | 336 if options.sync_host: |
| 336 UploadPrebuilt(options.build_path, options.upload, git_file=git_file) | 337 UploadPrebuilt(options.build_path, options.upload, git_file=git_file) |
| 337 | 338 |
| 338 if options.board: | 339 if options.board: |
| 339 UploadPrebuilt(options.build_path, options.upload, board=options.board, | 340 UploadPrebuilt(options.build_path, options.upload, board=options.board, |
| 340 git_file=git_file) | 341 git_file=git_file) |
| 341 | 342 |
| 342 | 343 |
| 343 if __name__ == '__main__': | 344 if __name__ == '__main__': |
| 344 main() | 345 main() |
| OLD | NEW |