Chromium Code Reviews| Index: appengine/isolate/handlers_frontend.py |
| diff --git a/appengine/isolate/handlers_frontend.py b/appengine/isolate/handlers_frontend.py |
| index 531dc7ae2b14d52565f38712f278f9dee4c6f2a1..715efa572f03024aa640ed61154fb78f1c14ccf6 100644 |
| --- a/appengine/isolate/handlers_frontend.py |
| +++ b/appengine/isolate/handlers_frontend.py |
| @@ -8,6 +8,7 @@ import cgi |
| import datetime |
| import json |
| import logging |
| +import os |
| import re |
| import webapp2 |
| @@ -191,9 +192,11 @@ class BrowseHandler(auth.AuthenticatingHandler): |
| namespace = self.request.get('namespace', 'default-gzip') |
| # Support 'hash' for compatibility with old links. To remove eventually. |
| digest = self.request.get('digest', '') or self.request.get('hash', '') |
| + save_as = self.request.get('as', '') |
| params = { |
| u'digest': unicode(digest), |
| u'namespace': unicode(namespace), |
| + u'as': unicode(save_as), |
|
M-A Ruel
2017/02/14 21:01:16
keep keys sorted
jonesmi
2017/02/14 23:01:18
Done.
|
| } |
| # Check for existence of element, so we can 400/404 |
| if digest and namespace: |
| @@ -258,33 +261,52 @@ class ContentHandler(auth.AuthenticatingHandler): |
| ' python isolateserver.py download -I %s --namespace %s -f %s %s' |
| % (sizeInMib, host, namespace, digest, digest)) |
| else: |
| - self.response.headers['Content-Disposition'] = str('filename=%s' |
| - % digest) |
| + self.response.headers['Content-Disposition'] = str( |
| + 'filename=%s' % self.request.get('as') or digest) |
| if content.startswith('{'): |
| # Try to format as JSON. |
| - try: |
| - content = json.dumps( |
| - json.loads(content), sort_keys=True, indent=2, |
| - separators=(',', ': ')) |
| - content = cgi.escape(content) |
| - # If we don't wrap this in html, browsers will put content in a pre |
| - # tag which is also styled with monospace/pre-wrap. We can't use |
| - # anchor tags in <pre>, so we force it to be a <div>, which happily |
| - # accepts links. |
| - content = ( |
| - '<div style="font-family:monospace;white-space:pre-wrap;">%s' |
| - '</div>' % content) |
| - # Linkify things that look like hashes |
| - content = re.sub(r'([0-9a-f]{40})', |
| - r'<a target="_blank" href="/browse?namespace=%s' % namespace + |
| - r'&digest=\1">\1</a>', |
| - content) |
| - self.response.headers['Content-Type'] = 'text/html; charset=utf-8' |
| - except ValueError: |
| - pass |
| + content = self._format_content_as_json(content, namespace) |
| + self.response.headers['Content-Type'] = 'text/html; charset=utf-8' |
| self.response.write(content) |
| + @staticmethod |
| + def _format_content_as_json(content, namespace): |
| + """Formats the content as json, and returns a string repr of the content.""" |
| + try: |
| + # Ensure we're working with HTML-safe content. Do this before adding |
| + # our own hyperlinks because cgi.escape would replace our anchor symbols. |
| + content = cgi.escape(content) |
|
M-A Ruel
2017/02/14 21:01:16
$ python -c 'import cgi,json; print json.loads(cgi
jonesmi
2017/02/14 23:01:18
Sounds good. Now doing the checks in a static meth
M-A Ruel
2017/02/14 23:46:14
There is still no guarantee that someone uploaded
jonesmi
2017/02/16 19:17:30
Ahh I see. Nice catch, thanks! We have to do cgi.e
|
| + data = json.loads(content) |
| + except ValueError: |
| + return content |
| + |
| + # Linkify all files |
| + if 'files' in data: |
|
M-A Ruel
2017/02/14 21:01:16
Let's add some more checks.
Ref: https://github.co
jonesmi
2017/02/14 23:01:18
Done, although it seems superfluous to check actua
|
| + hyperlinked_files = {} |
| + for filepath, metadata in data['files'].iteritems(): |
| + if 'h' in metadata: # Only linkify files that have a digest property |
| + save_as = os.path.basename(filepath) |
| + anchor = (r'<a target="_blank" ' |
|
M-A Ruel
2017/02/14 21:01:16
remove the quotes
jonesmi
2017/02/14 23:01:18
Done.
|
| + r'href="/browse?namespace=%s&digest=%s&as=%s">' |
|
M-A Ruel
2017/02/14 21:01:16
remove the quotes, and use urllib.quote(save_as) s
jonesmi
2017/02/14 23:01:18
Done.
|
| + r'%s</a>') % (namespace, metadata['h'], save_as, filepath) |
| + hyperlinked_files[anchor] = metadata |
| + data['files'] = hyperlinked_files |
| + |
| + content = json.dumps(data, sort_keys=True, indent=2, separators=(',', ': ')) |
| + # The string representation from json.dumps force-escapes all quotes, |
|
M-A Ruel
2017/02/14 21:01:16
This is not needed anymore, as there's no quotes w
jonesmi
2017/02/14 23:01:18
Done.
|
| + # which would break our href strings. Replace these with regular quotes. |
| + content = content.replace(r'\"', '"') |
| + |
| + # If we don't wrap this in html, browsers will put content in a pre |
| + # tag which is also styled with monospace/pre-wrap. We can't use |
| + # anchor tags in <pre>, so we force it to be a <div>, which happily |
| + # accepts links. |
| + content = ( |
| + '<div style="font-family:monospace;white-space:pre-wrap;">%s' |
| + '</div>' % content) |
| + return content |
| + |
| class StatsHandler(webapp2.RequestHandler): |
| """Returns the statistics web page.""" |