Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(134)

Unified Diff: tools/clang/scripts/update.py

Issue 1693363002: add a mechanism to define clang tarball mirrors (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: tools/clang/scripts/update.py
diff --git a/tools/clang/scripts/update.py b/tools/clang/scripts/update.py
index 322359f69596d13d4adafdf12b401beb32fa1e1b..354fee8cdd5a5900763b96f2df52916e8a2d5149 100755
--- a/tools/clang/scripts/update.py
+++ b/tools/clang/scripts/update.py
@@ -76,47 +76,84 @@ LLVM_REPO_URL='https://llvm.org/svn/llvm-project'
if 'LLVM_REPO_URL' in os.environ:
LLVM_REPO_URL = os.environ['LLVM_REPO_URL']
+# MIRROR_MAP is an optional environment variable, which contains zero or more
+# mappings from a URL prefix to one or more mirrors. A mapping from a URL
+# prefix to its mirrors starts with the URL prefix and is followed by a pipe
+# char and a mirror prefix, additional mirrors are separated by pipe
+# characters. Multiple such mappings can be separated by whitespace.
+# eg: MIRROR_MAP="http://orig1/foo|http://mirror1/foo/"
+MIRROR_MAP = {}
+if 'MIRROR_MAP' in os.environ:
+ for _mg in os.environ['MIRROR_MAP'].split():
+ _items = _mg.split('|')
+ MIRROR_MAP[_items[0]] = _items[1:]
+
+def UrlMirrors(orig_url):
+ """Return a list of mirror URLS to try (if any), followed by the
+ original URL."""
+ url_list = []
+ for url_prefix in MIRROR_MAP:
+ if orig_url.startswith(url_prefix):
+ for mirror_prefix in MIRROR_MAP[url_prefix]:
+ url_list += [ orig_url.replace(url_prefix, mirror_prefix, 1) ]
+ return url_list + [ orig_url ]
+
def DownloadUrl(url, output_file):
- """Download url into output_file."""
+ """Download url into output_file, or throw an exception."""
CHUNK_SIZE = 4096
TOTAL_DOTS = 10
- num_retries = 3
- retry_wait_s = 5 # Doubled at each retry.
+ sys.stdout.write('Downloading %s ' % url)
+ sys.stdout.flush()
+ response = urllib2.urlopen(url)
+ total_size = int(response.info().getheader('Content-Length').strip())
+ bytes_done = 0
+ dots_printed = 0
while True:
- try:
- sys.stdout.write('Downloading %s ' % url)
- sys.stdout.flush()
- response = urllib2.urlopen(url)
- total_size = int(response.info().getheader('Content-Length').strip())
- bytes_done = 0
- dots_printed = 0
- while True:
- chunk = response.read(CHUNK_SIZE)
- if not chunk:
- break
- output_file.write(chunk)
- bytes_done += len(chunk)
- num_dots = TOTAL_DOTS * bytes_done / total_size
- sys.stdout.write('.' * (num_dots - dots_printed))
- sys.stdout.flush()
- dots_printed = num_dots
- if bytes_done != total_size:
- raise urllib2.URLError("only got %d of %d bytes" %
- (bytes_done, total_size))
- print ' Done.'
- return
- except urllib2.URLError as e:
- sys.stdout.write('\n')
- print e
- if num_retries == 0 or isinstance(e, urllib2.HTTPError) and e.code == 404:
- raise e
- num_retries -= 1
+ chunk = response.read(CHUNK_SIZE)
+ if not chunk:
+ break
+ output_file.write(chunk)
+ bytes_done += len(chunk)
+ num_dots = TOTAL_DOTS * bytes_done / total_size
+ sys.stdout.write('.' * (num_dots - dots_printed))
+ sys.stdout.flush()
+ dots_printed = num_dots
+ if bytes_done != total_size:
+ raise urllib2.URLError("only got %d of %d bytes" %
+ (bytes_done, total_size))
+ print ' Done.'
+
+
+def DownloadMirroredUrl(orig_url, output_file):
+ """Download orig_url (or a mirror of it) into output_file."""
+ url_list = UrlMirrors(orig_url)
+ num_retries = 3 # Try each url in url_list this many times.
+ retry_wait_s = 5 # Doubled at each retry.
+
+ for loop_num in range(num_retries):
+
+ if loop_num > 0 and len(url_list) > 0:
print 'Retrying in %d s ...' % retry_wait_s
time.sleep(retry_wait_s)
retry_wait_s *= 2
+ # Loop over a copy of url_list since it may be modified:
+ for url in list(url_list):
+ try:
+ DownloadUrl(url, output_file)
+ return
+ except urllib2.URLError as e:
+ sys.stdout.write('\n')
+ print e
+
+ if isinstance(e, urllib2.HTTPError) and e.code == 404:
+ # Don't bother trying this URL again.
+ url_list.remove(url)
+
+ raise urllib2.URLError("Failed to download %s" % orig_url)
+
def EnsureDirExists(path):
if not os.path.exists(path):
@@ -126,7 +163,7 @@ def EnsureDirExists(path):
def DownloadAndUnpack(url, output_dir):
with tempfile.TemporaryFile() as f:
- DownloadUrl(url, f)
+ DownloadMirroredUrl(url, f)
f.seek(0)
EnsureDirExists(output_dir)
if url.endswith('.zip'):
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698