Index: build/google_storage_tools/upload_to_google_storage.py |
diff --git a/build/google_storage_tools/upload_to_google_storage.py b/build/google_storage_tools/upload_to_google_storage.py |
new file mode 100755 |
index 0000000000000000000000000000000000000000..5f98018689b719630e70c3826e6a68a5f9412442 |
--- /dev/null |
+++ b/build/google_storage_tools/upload_to_google_storage.py |
@@ -0,0 +1,268 @@ |
+#!/usr/bin/env python |
+# Copyright (c) 2012 The Chromium Authors. All rights reserved. |
+# Use of this source code is governed by a BSD-style license that can be |
+# found in the LICENSE file. |
+ |
+"""Script to upload files to Google Storage.""" |
+ |
+import hashlib |
+import optparse |
+import os |
+import Queue |
+import re |
+import subprocess |
+import sys |
+import tempfile |
+import threading |
+import time |
+import zipfile |
+ |
+# TODO(hinoka): This is currently incorrect. Should find a better default. |
+GSUTIL_DEFAULT_PATH = os.path.join(os.path.dirname(os.path.normpath(__file__)), |
+ '..', '..', 'third_party', 'gsutil', 'gsutil') |
+ |
+USAGE_STRING = """%prog [options] target [target2 ...]. |
+Target is the file intended to be uploaded to Google Storage. |
+If target is "-", then a list of files will be taken from standard input |
+ |
+This script will generate a file (original filename).sha1 containing the |
+sha1 sum of the uploaded file. |
+It is recommended that the .sha1 file is checked into the repository, |
+the original file removed from the repository, and a hook added to the |
+DEPS file to call download_from_google_storage.py. |
+ |
+Example usages |
+-------------- |
+ |
+Scan the current directory and upload all files larger than 1MB: |
+find . -name .svn -prune -o -size +1000k -type f -print0 | %prog -0 - |
+""" |
+ |
+ |
+class Gsutil(): |
+ def __init__(self, path, boto_path=None, timeout=None): |
+ if not os.path.exists(path): |
+ raise OSError('GSUtil not found in %s' % path) |
+ self.path = path |
+ |
+ self.timeout = timeout |
+ self.boto_path = boto_path |
+ |
+ def call(self, *args): |
+ def _thread_main(): |
+ thr = threading.current_thread() |
+ env = os.environ.copy() |
+ if self.boto_path is not None: |
+ env['AWS_CREDENTIAL_FILE'] = self.boto_path |
+ thr.status = subprocess.call((sys.executable, self.path) + args, env=env) |
+ thr = threading.Thread(target=_thread_main) |
+ thr.start() |
+ thr.join(self.timeout) |
+ if thr.isAlive(): |
+ raise RuntimeError('%s %s timed out after %d seconds.' % ( |
+ self.path, ' '.join(args), self.timeout)) |
+ return thr.status |
+ |
+ def check_call(self, *args): |
+ def _thread_main(): |
+ thr = threading.current_thread() |
+ env = os.environ.copy() |
+ if self.boto_path is not None: |
+ env['AWS_CREDENTIAL_FILE'] = self.boto_path |
+ p = subprocess.Popen((sys.executable, self.path) + args, |
+ stdout=subprocess.PIPE, |
+ stderr=subprocess.PIPE, |
+ env=env) |
+ code = p.wait() |
+ out, err = p.communicate() |
+ thr.status = (code, out, err) |
+ |
+ thr = threading.Thread(target=_thread_main) |
+ thr.start() |
+ thr.join(self.timeout) |
+ if thr.isAlive(): |
+ raise RuntimeError('%s %s timed out after %d seconds.' % ( |
+ self.path, ' '.join(args), self.timeout)) |
+ code, out, err = thr.status |
+ status_code_match = re.search('status=([0-9]+)', err) |
+ if status_code_match: |
+ return int(status_code_match.groups(1)) |
+ elif ('You are attempting to access protected data with ' |
+ 'no configured credentials.' in err): |
+ return (403, out, err) |
+ elif 'No such object' in err: |
+ return (404, out, err) |
+ else: |
+ return (code, out, err) |
+ |
+ def clone(self): |
+ return Gsutil(self.path, self.boto_path, self.timeout) |
+ |
+ |
+def GetSHA1(filename): |
+ sha1 = hashlib.sha1() |
+ with open(filename, 'rb') as f: |
+ while True: |
+ # Read in 1mb chunks, so it doesn't all have to be loaded into memory. |
+ chunk = f.read(1024*1024) |
+ if not chunk: |
+ break |
+ sha1.update(chunk) |
+ return sha1.hexdigest() |
+ |
+ |
+def CheckSHA1(sha1_sum, filename): |
+ return sha1_sum == GetSHA1(filename) |
+ |
+ |
+def GetMD5(filename, lock, use_md5): |
+ # See if we can find an existing MD5 sum stored in a file. |
+ if use_md5 and os.path.exists('%s.md5' % filename): |
+ with open('%s.md5' % filename) as f: |
+ md5_match = re.search('([a-z0-9]{32})', f.read()) |
+ if md5_match: |
+ return md5_match.groups()[0] |
+ |
+ # Calculate the MD5 checksum of the file. |
+ md5_calculator = hashlib.md5() |
+ with lock: |
+ with open(filename, 'rb') as f: |
+ while True: |
+ chunk = f.read(1024*1024) |
+ if not chunk: |
+ break |
+ md5_calculator.update(chunk) |
+ local_md5 = md5_calculator.hexdigest() |
+ if use_md5: |
+ with open('%s.md5' % filename, 'w') as f: |
+ f.write(local_md5) |
+ return local_md5 |
+ |
+ |
+def _upload_worker(thread_num, q, base_url, gsutil, options, md5_lock): |
+ while True: |
+ try: |
+ filename, sha1_sum = q.get_nowait() |
+ file_url = '%s/%s' % (base_url, sha1_sum) |
+ if gsutil.check_call('ls', file_url)[0] == 0 and not options.force: |
+ # File exists, check MD5 hash. |
+ _, out, _ = gsutil.check_call('ls', '-L', file_url) |
+ etag_match = re.search('ETag:\s+([a-z0-9]{32})', out) |
+ if etag_match: |
+ remote_md5 = etag_match.groups()[0] |
+ # Calculate the MD5 checksum to match it to Google Storage's ETag. |
+ local_md5 = GetMD5(filename, md5_lock, options.use_md5) |
+ if local_md5 == remote_md5: |
+ print ('File %s already exists at %s and MD5 matches, exiting' % |
+ (filename, file_url)) |
+ continue |
+ print 'Uploading %s to %s' % (filename, file_url) |
+ code = gsutil.call('cp', '-q', filename, file_url) |
+ if code != 0: |
+ print >>sys.stderr, gsutil.stderr |
+ continue |
+ except Queue.Empty: |
+ return |
+ |
+def main(args): |
+ parser = optparse.OptionParser(USAGE_STRING) |
+ parser.add_option('-b', '--bucket', default='chrome-artifacts', |
+ help='Google Storage bucket to upload to.') |
+ parser.add_option('-e', '--boto', default=None, |
+ help='Specify a custom boto file.') |
+ parser.add_option('-f', '--force', action='store_true', default=False, |
+ help='Force upload even if remote file exists.') |
+ parser.add_option('-g', '--gsutil_path', default=GSUTIL_DEFAULT_PATH, |
+ help='Path to the gsutil script.') |
+ parser.add_option('-m', '--use_md5', action='store_true', default=False, |
+ help='Generate MD5 files when scanning, and don\'t check ' |
+ 'the MD5 checksum if a .md5 file is found.') |
+ parser.add_option('-t', '--num_threads', default=1, type='int', |
+ help='Number of uploader threads to run.') |
+ parser.add_option('-s', '--skip_hashing', action='store_true', default=False, |
+ help='Skip hashing if .sha1 file exists.') |
+ parser.add_option('-0', '--use_null_terminator', action='store_true', |
+ default=False, help='Use \\0 instead of \\n when parsing ' |
+ 'the file list from stdin. This is useful if the input ' |
+ 'is coming from "find ... -print0".') |
+ (options, args) = parser.parse_args() |
+ |
+ if len(args) < 1: |
+ parser.error('Missing target.') |
+ elif len(args) == 1 and args[0] == '-': |
+ # Take stdin as a newline or null seperated list of files. |
+ if options.use_null_terminator: |
+ input_filenames = [line for line in sys.stdin.read().split('\0')] |
+ else: |
+ input_filenames = [line.strip() for line in sys.stdin.readlines()] |
+ else: |
+ input_filenames = args |
+ base_url = 'gs://%s' % options.bucket |
+ |
+ # Make sure we can find a working instance of gsutil. |
+ if os.path.exists(options.gsutil_path): |
+ gsutil = Gsutil(options.gsutil_path) |
+ else: |
+ for path in os.environ["PATH"].split(os.pathsep): |
+ if os.path.exists(path) and 'gsutil' in os.listdir(path): |
+ gsutil = Gsutil(os.path.join(path, 'gsutil')) |
+ |
+ # Check if we have permissions to the Google Storage bucket. |
+ code, ls_out, ls_err = gsutil.check_call('ls', base_url) |
+ if code == 403: |
+ code, _, _ = gsutil.call('config') |
+ if code != 0: |
+ print >>sys.stderr, 'Error while authenticating to %s, exiting' % base_url |
+ return 403 |
+ elif code == 404: |
+ print >>sys.stderr, '%s not found.' % base_url |
+ return 404 |
+ elif code != 0: |
+ print >>sys.stderr, ls_err |
+ return code |
+ |
+ # We want to hash everything in a single thread since its faster. |
+ # The bottleneck is in disk IO, not CPU. |
+ upload_queue = Queue.Queue() |
+ hash_timer = time.time() |
+ for filename in input_filenames: |
+ if not os.path.exists(filename): |
+ print 'Error: %s not found, skipping.' % filename |
+ continue |
+ if os.path.exists('%s.sha1' % filename) and options.skip_hashing: |
+ print 'Found hash for %s, skipping.' % filename |
+ upload_queue.put((filename, open('%s.sha1' % filename).read())) |
+ continue |
+ print 'Calculating hash for %s...' % filename, |
+ sha1_sum = GetSHA1(filename) |
+ with open(filename + '.sha1', 'w') as f: |
+ f.write(sha1_sum) |
+ print 'done' |
+ upload_queue.put((filename, sha1_sum)) |
+ hash_time = time.time() - hash_timer |
+ |
+ # Start up all the worker threads. |
+ all_threads = [] |
+ |
+ # We only want one MD5 calculation happening at a time. |
+ md5_lock = threading.Lock() |
+ upload_timer = time.time() |
+ |
+ for thread_num in range(options.num_threads): |
+ t = threading.Thread(target=_upload_worker, args=[thread_num, |
+ upload_queue, base_url, gsutil.clone(), options, md5_lock]) |
+ t.daemon = True |
+ t.start() |
+ all_threads.append(t) |
+ |
+ # Wait for everything to finish. |
+ for t in all_threads: |
+ t.join() |
+ |
+ print 'Success.' |
+ print 'Hashing %s files took %1f seconds' % (len(input_filenames), hash_time) |
+ print 'Uploading took %1f seconds' % (time.time() - upload_timer) |
+ return 0 |
+ |
+if __name__ == '__main__': |
+ sys.exit(main(sys.argv)) |