Chromium Code Reviews| Index: bench/bench_graph_svg.py |
| =================================================================== |
| --- bench/bench_graph_svg.py (revision 7965) |
| +++ bench/bench_graph_svg.py (working copy) |
| @@ -3,12 +3,16 @@ |
| @author: bungeman |
| ''' |
| -import sys |
| +import bench_util |
| import getopt |
| +import httplib |
| +import itertools |
| +import json |
| +import os |
| import re |
| -import os |
| -import bench_util |
| -import json |
| +import sys |
| +import urllib |
| +import urllib2 |
| import xml.sax.saxutils |
| # We throw out any measurement outside this range, and log a warning. |
| @@ -19,9 +23,23 @@ |
| TITLE_PREAMBLE = 'Bench_Performance_for_Skia_' |
| TITLE_PREAMBLE_LENGTH = len(TITLE_PREAMBLE) |
| +# Number of data points to send to appengine at once. |
| +DATA_POINT_BATCHSIZE = 100 |
| + |
| +def grouper(n, iterable): |
| + """Groups list into list of lists for a given size. See itertools doc: |
| + http://docs.python.org/2/library/itertools.html#module-itertools |
| + """ |
| + args = [iter(iterable)] * n |
| + return [[n for n in t if n] for t in itertools.izip_longest(*args)] |
| + |
| + |
| def usage(): |
| """Prints simple usage information.""" |
| - |
| + |
| + print '-a <url> the url to use for adding bench values to app engine app.' |
| + print ' Example: "https://skiadash.appspot.com/add_point".' |
| + print ' If not set, will skip this step.' |
| print '-b <bench> the bench to show.' |
| print '-c <config> the config to show (GPU, 8888, 565, etc).' |
| print '-d <dir> a directory containing bench_r<revision>_<scalar> files.' |
| @@ -192,7 +210,20 @@ |
| """Convert revision data into a dictionary of line data. |
| ({int:[BenchDataPoints]}, {str:str}, str?, str?, str?) |
| - -> {Label:[(x,y)] | [n].x <= [n+1].x}""" |
| + -> {Label:[(x,y)] | [n].x <= [n+1].x} |
| + INPUT parameters of the function are: |
| + 1. a dictionary with integer keys (revision #) and a list of bench data |
| + points as values |
| + 2. a dictionary of setting names to value |
| + 3-5. optional filter parameters: which bench, config, or timer type is of |
| + interest. For each one, if None, process them all. |
| + 6. optional timer type to ignore |
| + |
| + OUTPUT is a dictionary of this form: |
| + keys = Label objects |
| + values = a list of (x, y) tuples sorted such that x values increase |
| + monotonically |
| + """ |
| revisions = revision_data_points.keys() |
| revisions.sort() |
| lines = {} # {Label:[(x,y)] | x[n] <= x[n+1]} |
| @@ -286,7 +317,7 @@ |
| try: |
| opts, _ = getopt.getopt(sys.argv[1:] |
| - , "b:c:d:e:f:i:l:m:o:r:s:t:x:y:" |
| + , "a:b:c:d:e:f:i:l:m:o:r:s:t:x:y:" |
| , "default-setting=") |
| except getopt.GetoptError, err: |
| print str(err) |
| @@ -299,6 +330,7 @@ |
| time_of_interest = None |
| time_to_ignore = None |
| bench_expectations = {} |
| + appengine_url = None # used for adding data to appengine datastore |
| rep = None # bench representation algorithm |
| revision_range = '0:' |
| regression_range = '0:' |
| @@ -370,9 +402,45 @@ |
| raise Exception('Bench values out of range:\n' + |
| '\n'.join(exceptions)) |
| + def write_to_appengine(line_data_dict, url, newest_revision, bot): |
| + """Writes latest bench values to appengine datastore. |
| + line_data_dict: dictionary from create_lines. |
| + url: the appengine url used to send bench values to write |
| + newest_revision: the latest revision that this script reads |
| + bot: the bot platform the bench is run on |
| + """ |
| + data = [] |
| + for label in line_data_dict: |
|
epoger
2013/03/07 16:56:46
please change this to:
for label in line_data_dic
benchen
2013/03/07 17:13:27
Done.
|
| + if not label.bench.endswith('.skp') or label.time_type: |
| + # filter out non-picture and non-walltime benches |
| + continue |
| + config = label.config |
| + rev, val = line_data_dict[label][-1] |
|
epoger
2013/03/07 16:56:46
So I guess the assumption here is that newest_revi
benchen
2013/03/07 17:13:27
Done.
|
| + if rev != newest_revision: |
| + continue |
| + data.append({'master': 'Skia', 'bot': bot, |
| + 'test': config + '/' + label.bench.replace('.skp', ''), |
| + 'revision': rev, 'value': val, 'error': 0}) |
| + for curr_data in grouper(DATA_POINT_BATCHSIZE, data): |
| + req = urllib2.Request(appengine_url, |
| + urllib.urlencode({'data': json.dumps(curr_data)})) |
| + try: |
| + urllib2.urlopen(req) |
| + except urllib2.HTTPError, e: |
| + sys.stderr.write("HTTPError for JSON data %s: %s\n" % ( |
| + data, e)) |
| + except urllib2.URLError, e: |
| + sys.stderr.write("URLError for JSON data %s: %s\n" % ( |
| + data, e)) |
| + except httplib.HTTPException, e: |
| + sys.stderr.write("HTTPException for JSON data %s: %s\n" % ( |
| + data, e)) |
| + |
| try: |
| for option, value in opts: |
| - if option == "-b": |
| + if option == "-a": |
| + appengine_url = value |
| + elif option == "-b": |
| bench_of_interest = value |
| elif option == "-c": |
| config_of_interest = value |
| @@ -421,10 +489,11 @@ |
| # for use in platform_and_alg to track matching benches later. If title flag |
| # is not in this format, there may be no matching benches in the file |
| # provided by the expectation_file flag (-e). |
| + bot = title # To store the platform as bot name |
| platform_and_alg = title |
| if platform_and_alg.startswith(TITLE_PREAMBLE): |
| - platform_and_alg = ( |
| - platform_and_alg[TITLE_PREAMBLE_LENGTH:] + '-' + rep) |
| + bot = platform_and_alg[TITLE_PREAMBLE_LENGTH:] |
| + platform_and_alg = bot + '-' + rep |
| title += ' [representation: %s]' % rep |
| latest_revision = get_latest_revision(directory) |
| @@ -461,6 +530,9 @@ |
| output_xhtml(lines, oldest_revision, newest_revision, ignored_revision_data_points, |
| regressions, requested_width, requested_height, title) |
| + if appengine_url: |
| + write_to_appengine(lines, appengine_url, newest_revision, bot) |
| + |
| check_expectations(lines, bench_expectations, newest_revision, |
| platform_and_alg) |