| Index: build/upload_to_google_storage.py
|
| diff --git a/build/upload_to_google_storage.py b/build/upload_to_google_storage.py
|
| new file mode 100755
|
| index 0000000000000000000000000000000000000000..592cd5d3350f7925a50a7bb216f6a3fb7eb6e7cb
|
| --- /dev/null
|
| +++ b/build/upload_to_google_storage.py
|
| @@ -0,0 +1,198 @@
|
| +#!/usr/bin/env python
|
| +# Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
| +# Use of this source code is governed by a BSD-style license that can be
|
| +# found in the LICENSE file.
|
| +"""Script to download files from Google Storage."""
|
| +import hashlib
|
| +import os
|
| +import Queue
|
| +import re
|
| +import subprocess
|
| +import sys
|
| +import tempfile
|
| +import threading
|
| +import time
|
| +import zipfile
|
| +
|
| +from optparse import OptionParser
|
| +
|
| +GSUTIL_DEFAULT_PATH = os.path.join(os.path.dirname(os.path.normpath(__file__)),
|
| + '..', '..', 'third_party', 'gsutil', 'gsutil')
|
| +
|
| +
|
| +class Gsutil():
|
| + def __init__(self, path):
|
| + if os.path.exists(path):
|
| + self.path = path
|
| + else:
|
| + raise IOError('GSUtil not found in %s' % path)
|
| +
|
| + def call(self, *args):
|
| + p = subprocess.Popen((sys.executable, self.path) + args)
|
| + return p.wait()
|
| +
|
| + def check_call(self, *args):
|
| + p = subprocess.Popen((sys.executable, self.path) + args,
|
| + stdout=subprocess.PIPE,
|
| + stderr=subprocess.PIPE)
|
| + code = p.wait()
|
| + out, err = p.communicate()
|
| + self.stdout = out
|
| + self.stderr = err
|
| +
|
| + if code == 0:
|
| + return 0
|
| +
|
| + status_code_match = re.search('status=([0-9]+)', err)
|
| + if status_code_match:
|
| + return int(status_code_match.groups(1))
|
| + elif ('You are attempting to access protected data with '
|
| + 'no configured credentials.' in err):
|
| + return 403
|
| + elif 'No such object' in err:
|
| + return 404
|
| + else:
|
| + return code
|
| +
|
| +
|
| +class GsutilFactory():
|
| + def __init__(self, path, boto=None):
|
| + self.path = path
|
| +
|
| +
|
| +def CheckSHA1(sha1_sum, filename):
|
| + sha1 = hashlib.sha1()
|
| + sha1.update(open(filename).read())
|
| + return sha1_sum == sha1.hexdigest()
|
| +
|
| +def _upload_worker(thread_num, q, base_url, gsutil, options, md5_lock):
|
| + while True:
|
| + try:
|
| + filename, sha1_sum = q.get_nowait()
|
| + file_url = '%s/%s' % (base_url, sha1_sum)
|
| + if gsutil.call('ls', file_url) == 0 and not options.force:
|
| + # File exists, check MD5 hash.
|
| + gsutil.call('ls', '-L', file_url)
|
| + etag_match = re.search('ETag:\s+([a-z0-9]{32})', gsutil.stdout)
|
| + if etag_match:
|
| + remote_md5 = etag_match.groups()[0]
|
| + md5_calculator = hashlib.md5()
|
| + with md5_lock:
|
| + md5_calculator.update(open(filename).read())
|
| + local_md5 = md5_calculator.hexdigest()
|
| + if local_md5 == remote_md5:
|
| + print ('File already exists at %s and MD5 matches, exiting' %
|
| + file_url)
|
| + continue
|
| + print 'Uploading %s to %s' % (filename, file_url)
|
| + code = gsutil.call_interactive('cp', '-q', filename, file_url)
|
| + if code != 0:
|
| + print >>sys.stderr, gsutil.stderr
|
| + continue
|
| + except Queue.Empty:
|
| + return
|
| +
|
| +def main(args):
|
| + parser = OptionParser()
|
| + parser.add_option('-d', '--delete', action='store_true', default=False,
|
| + help='Deletes the target file after upload.')
|
| + parser.add_option('-b', '--bucket', default='chrome-artifacts',
|
| + help='Google Storage bucket to fetch from.')
|
| + parser.add_option('-f', '--force', action='store_true', default=False,
|
| + help='Force upload even if remote file exists.')
|
| + parser.add_option('-g', '--gsutil_path', default=GSUTIL_DEFAULT_PATH,
|
| + help='Path to the gsutil script.')
|
| + parser.add_option('-t', '--num_threads', default=1, type='int',
|
| + help='Number of uploader threads to run.')
|
| + parser.add_option('-s', '--skip_hashing', action='store_true', default=False,
|
| + help='Skip hashing if .sha1 file exists.')
|
| + (options, args) = parser.parse_args()
|
| +
|
| + if len(args) < 1:
|
| + print >>sys.stderr, 'Missing target.'
|
| + return 1
|
| + else:
|
| + input_filename = args[0]
|
| + base_url = 'gs://%s' % options.bucket
|
| + if os.path.exists(options.gsutil_path):
|
| + gsutil = Gsutil(options.gsutil_path)
|
| + else:
|
| + for path in os.environ["PATH"].split(os.pathsep):
|
| + if os.path.exists(path) and 'gsutil' in os.listdir(path):
|
| + gsutil = Gsutil(os.path.join(path, 'gsutil'))
|
| +
|
| + # Check if we have permissions.
|
| + code = gsutil.call('ls', base_url)
|
| + if code == 403:
|
| + code = gsutil.call_interactive('config')
|
| + if code != 0:
|
| + print >>sys.stderr, 'Error while authenticating to %s, exiting' % base_url
|
| + return 403
|
| + elif code == 404:
|
| + print >>sys.stderr, '%s not found.' % base_url
|
| + return 404
|
| + elif code != 0:
|
| + print >>sys.stderr, gsutil.stderr
|
| + return code
|
| +
|
| + # Enumerate the list of file(s) we want to transfer over.
|
| + hash_queue = []
|
| + if input_filename == '-':
|
| + # Take stdin as a newline-seperated list of files.
|
| + for line in sys.stdin.readlines():
|
| + hash_queue.append(line.strip())
|
| + else:
|
| + hash_queue.append(input_filename)
|
| +
|
| + # We want to hash everything in a single thread since its faster.
|
| + # The bottleneck is in disk IO, not CPU.
|
| + upload_queue = Queue.Queue()
|
| + hash_timer = time.time()
|
| + for filename in hash_queue:
|
| + if os.path.exists('%s.sha1' % filename) and options.skip_hashing:
|
| + print 'Found hash for %s, skipping.' % filename
|
| + upload_queue.put((filename, open('%s.sha1' % filename).read()))
|
| + continue
|
| + print 'Calculating hash for %s...' % filename,
|
| + sha1_calculator = hashlib.sha1()
|
| + sha1_calculator.update(open(filename).read())
|
| + sha1_sum = sha1_calculator.hexdigest()
|
| + with open(filename + '.sha1', 'w') as f:
|
| + f.write(sha1_sum)
|
| + print 'done'
|
| + upload_queue.put((filename, sha1_sum))
|
| + hash_time = time.time() - hash_timer
|
| +
|
| + # Start up all the worker threads.
|
| + all_threads = []
|
| +
|
| + # We only want one MD5 calculation happening at a time.
|
| + md5_lock = threading.Lock()
|
| + upload_timer = time.time()
|
| +
|
| + for thread_num in range(options.num_threads):
|
| + t = threading.Thread(target=_upload_worker, args=[thread_num,
|
| + upload_queue, base_url, gsutil, options, md5_lock])
|
| + t.daemon = True
|
| + t.start()
|
| + all_threads.append(t)
|
| +
|
| + def _wait_thread(threads, done):
|
| + for t in threads:
|
| + t.join()
|
| + print 'Now we\'re done'
|
| + done.set()
|
| +
|
| + # Have a thread set a flag when all the tasks are done.
|
| + done = threading.Event()
|
| + done_thread = threading.Thread(target=_wait_thread, args=[all_threads, done])
|
| + done_thread.daemon = True
|
| + done_thread.start()
|
| +
|
| + while not done.is_set():
|
| + time.sleep(1) # Do a sleep loop so we can ctrl + c out of this anytime.
|
| + print 'Hashing %s took %1f seconds' % (len(hash_queue), hash_time)
|
| + print 'Uploading took %1f seconds' % (time.time() - upload_timer)
|
| +
|
| +if __name__ == '__main__':
|
| + sys.exit(main(sys.argv))
|
|
|