Chromium Code Reviews| Index: bench/bench_graph_svg.py |
| =================================================================== |
| --- bench/bench_graph_svg.py (revision 7965) |
| +++ bench/bench_graph_svg.py (working copy) |
| @@ -3,12 +3,16 @@ |
| @author: bungeman |
| ''' |
| -import sys |
| +import bench_util |
| import getopt |
| +import httplib |
| +import itertools |
| +import json |
| +import os |
| import re |
| -import os |
| -import bench_util |
| -import json |
| +import sys |
| +import urllib |
| +import urllib2 |
| import xml.sax.saxutils |
| # We throw out any measurement outside this range, and log a warning. |
| @@ -19,9 +23,23 @@ |
| TITLE_PREAMBLE = 'Bench_Performance_for_Skia_' |
| TITLE_PREAMBLE_LENGTH = len(TITLE_PREAMBLE) |
| +# Number of data points to send to appengine at once. |
| +DATA_POINT_BATCHSIZE = 100 |
| + |
| +def grouper(n, iterable): |
| + """Groups list into list of lists for a given size. See itertools doc: |
| + http://docs.python.org/2/library/itertools.html#module-itertools |
| + """ |
| + args = [iter(iterable)] * n |
| + return [[n for n in t if n] for t in itertools.izip_longest(*args)] |
| + |
| + |
| def usage(): |
| """Prints simple usage information.""" |
| - |
| + |
| + print '-a <url> the url to use for adding bench values to app engine app.' |
| + print ' Example: "https://skiadash.appspot.com/add_point".' |
| + print ' If not set, will skip this step.' |
| print '-b <bench> the bench to show.' |
| print '-c <config> the config to show (GPU, 8888, 565, etc).' |
| print '-d <dir> a directory containing bench_r<revision>_<scalar> files.' |
| @@ -286,7 +304,7 @@ |
| try: |
| opts, _ = getopt.getopt(sys.argv[1:] |
| - , "b:c:d:e:f:i:l:m:o:r:s:t:x:y:" |
| + , "a:b:c:d:e:f:i:l:m:o:r:s:t:x:y:" |
| , "default-setting=") |
| except getopt.GetoptError, err: |
| print str(err) |
| @@ -299,6 +317,7 @@ |
| time_of_interest = None |
| time_to_ignore = None |
| bench_expectations = {} |
| + appengine_url = None # used for adding data to appengine datastore |
| rep = None # bench representation algorithm |
| revision_range = '0:' |
| regression_range = '0:' |
| @@ -370,9 +389,47 @@ |
| raise Exception('Bench values out of range:\n' + |
| '\n'.join(exceptions)) |
| + def write_to_appengine(lines, url, newest_revision, bot): |
| + """Writes latest bench values to appengine datastore. |
| + lines: dictionary from create_lines. {Label:[(x,y)] | x[n] <= x[n+1]} |
|
epoger
2013/03/06 17:17:22
I don't understand the notation at the end of this
benchen
2013/03/06 18:52:37
Just copied it from create_lines. Shall I remove i
|
| + url: the appengine url used to send bench values to write |
| + newest_revision: the latest revision that this script reads |
| + bot: the bot platform the bench is run on |
| + """ |
| + data = [] |
| + for line in lines: |
|
epoger
2013/03/06 17:17:22
Please rename the "lines" param to something like
benchen
2013/03/06 18:52:37
Done.
|
| + line_str = str(line)[ : str(line).find('_{')] |
|
epoger
2013/03/06 17:17:22
Why are we stringifying the Label object in "line"
benchen
2013/03/06 18:52:37
You're right. I was still doing the "xhtml parsing
|
| + if line_str.find('.skp') < 0 or not line_str.endswith('_'): |
|
epoger
2013/03/06 17:17:22
If you're trying to filter the bench name, use lab
benchen
2013/03/06 18:52:37
Done.
|
| + # filter out non-picture and non-walltime benches |
| + continue |
| + bench, config = line_str.split('.skp', 1) |
| + config = config.strip('_') # remove leading and trailing '_' |
| + rev, val = lines[line][-1] |
| + if rev != newest_revision: |
| + continue |
| + data.append({'master': 'Skia', 'bot': bot, |
| + 'test': config + '/' + bench, |
| + 'revision': rev, 'value': val, 'error': 0}) |
| + for curr_data in grouper(DATA_POINT_BATCHSIZE, data): |
| + req = urllib2.Request(appengine_url, |
| + urllib.urlencode({'data': json.dumps(curr_data)})) |
| + try: |
| + urllib2.urlopen(req) |
| + except urllib2.HTTPError, e: |
| + sys.stderr.write("HTTPError for JSON data %s: %s\n" % ( |
| + data, e)) |
| + except urllib2.URLError, e: |
| + sys.stderr.write("URLError for JSON data %s: %s\n" % ( |
| + data, e)) |
| + except httplib.HTTPException, e: |
| + sys.stderr.write("HTTPException for JSON data %s: %s\n" % ( |
| + data, e)) |
| + |
| try: |
| for option, value in opts: |
| - if option == "-b": |
| + if option == "-a": |
| + appengine_url = value |
| + elif option == "-b": |
| bench_of_interest = value |
| elif option == "-c": |
| config_of_interest = value |
| @@ -421,10 +478,11 @@ |
| # for use in platform_and_alg to track matching benches later. If title flag |
| # is not in this format, there may be no matching benches in the file |
| # provided by the expectation_file flag (-e). |
| + bot = title # To store the platform as bot name |
| platform_and_alg = title |
| if platform_and_alg.startswith(TITLE_PREAMBLE): |
| - platform_and_alg = ( |
| - platform_and_alg[TITLE_PREAMBLE_LENGTH:] + '-' + rep) |
| + bot = platform_and_alg[TITLE_PREAMBLE_LENGTH:] |
| + platform_and_alg = bot + '-' + rep |
| title += ' [representation: %s]' % rep |
| latest_revision = get_latest_revision(directory) |
| @@ -461,6 +519,9 @@ |
| output_xhtml(lines, oldest_revision, newest_revision, ignored_revision_data_points, |
| regressions, requested_width, requested_height, title) |
| + if appengine_url: |
| + write_to_appengine(lines, appengine_url, newest_revision, bot) |
| + |
| check_expectations(lines, bench_expectations, newest_revision, |
| platform_and_alg) |