Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 ''' | 1 ''' |
| 2 Created on May 16, 2011 | 2 Created on May 16, 2011 |
| 3 | 3 |
| 4 @author: bungeman | 4 @author: bungeman |
| 5 ''' | 5 ''' |
| 6 import bench_util | |
| 7 import getopt | |
| 8 import httplib | |
| 9 import itertools | |
| 10 import json | |
| 11 import os | |
| 12 import re | |
| 6 import sys | 13 import sys |
| 7 import getopt | 14 import urllib |
| 8 import re | 15 import urllib2 |
| 9 import os | |
| 10 import bench_util | |
| 11 import json | |
| 12 import xml.sax.saxutils | 16 import xml.sax.saxutils |
| 13 | 17 |
| 14 # We throw out any measurement outside this range, and log a warning. | 18 # We throw out any measurement outside this range, and log a warning. |
| 15 MIN_REASONABLE_TIME = 0 | 19 MIN_REASONABLE_TIME = 0 |
| 16 MAX_REASONABLE_TIME = 99999 | 20 MAX_REASONABLE_TIME = 99999 |
| 17 | 21 |
| 18 # Constants for prefixes in output title used in buildbot. | 22 # Constants for prefixes in output title used in buildbot. |
| 19 TITLE_PREAMBLE = 'Bench_Performance_for_Skia_' | 23 TITLE_PREAMBLE = 'Bench_Performance_for_Skia_' |
| 20 TITLE_PREAMBLE_LENGTH = len(TITLE_PREAMBLE) | 24 TITLE_PREAMBLE_LENGTH = len(TITLE_PREAMBLE) |
| 21 | 25 |
| 26 # Number of data points to send to appengine at once. | |
| 27 DATA_POINT_BATCHSIZE = 100 | |
| 28 | |
| 29 def grouper(n, iterable): | |
| 30 """Groups list into list of lists for a given size. See itertools doc: | |
| 31 http://docs.python.org/2/library/itertools.html#module-itertools | |
| 32 """ | |
| 33 args = [iter(iterable)] * n | |
| 34 return [[n for n in t if n] for t in itertools.izip_longest(*args)] | |
| 35 | |
| 36 | |
| 22 def usage(): | 37 def usage(): |
| 23 """Prints simple usage information.""" | 38 """Prints simple usage information.""" |
| 24 | 39 |
| 40 print '-a <url> the url to use for adding bench values to app engine app.' | |
| 41 print ' Example: "https://skiadash.appspot.com/add_point".' | |
| 42 print ' If not set, will skip this step.' | |
| 25 print '-b <bench> the bench to show.' | 43 print '-b <bench> the bench to show.' |
| 26 print '-c <config> the config to show (GPU, 8888, 565, etc).' | 44 print '-c <config> the config to show (GPU, 8888, 565, etc).' |
| 27 print '-d <dir> a directory containing bench_r<revision>_<scalar> files.' | 45 print '-d <dir> a directory containing bench_r<revision>_<scalar> files.' |
| 28 print '-e <file> file containing expected bench values/ranges.' | 46 print '-e <file> file containing expected bench values/ranges.' |
| 29 print ' Will raise exception if actual bench values are out of range.' | 47 print ' Will raise exception if actual bench values are out of range.' |
| 30 print ' See bench_expectations.txt for data format and examples.' | 48 print ' See bench_expectations.txt for data format and examples.' |
| 31 print '-f <revision>[:<revision>] the revisions to use for fitting.' | 49 print '-f <revision>[:<revision>] the revisions to use for fitting.' |
| 32 print ' Negative <revision> is taken as offset from most recent revision.' | 50 print ' Negative <revision> is taken as offset from most recent revision.' |
| 33 print '-i <time> the time to ignore (w, c, g, etc).' | 51 print '-i <time> the time to ignore (w, c, g, etc).' |
| 34 print ' The flag is ignored when -t is set; otherwise we plot all the' | 52 print ' The flag is ignored when -t is set; otherwise we plot all the' |
| (...skipping 244 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 279 max_up_slope = max(max_up_slope, min_slope) | 297 max_up_slope = max(max_up_slope, min_slope) |
| 280 min_down_slope = min(min_down_slope, min_slope) | 298 min_down_slope = min(min_down_slope, min_slope) |
| 281 | 299 |
| 282 return (max_up_slope, min_down_slope) | 300 return (max_up_slope, min_down_slope) |
| 283 | 301 |
| 284 def main(): | 302 def main(): |
| 285 """Parses command line and writes output.""" | 303 """Parses command line and writes output.""" |
| 286 | 304 |
| 287 try: | 305 try: |
| 288 opts, _ = getopt.getopt(sys.argv[1:] | 306 opts, _ = getopt.getopt(sys.argv[1:] |
| 289 , "b:c:d:e:f:i:l:m:o:r:s:t:x:y:" | 307 , "a:b:c:d:e:f:i:l:m:o:r:s:t:x:y:" |
| 290 , "default-setting=") | 308 , "default-setting=") |
| 291 except getopt.GetoptError, err: | 309 except getopt.GetoptError, err: |
| 292 print str(err) | 310 print str(err) |
| 293 usage() | 311 usage() |
| 294 sys.exit(2) | 312 sys.exit(2) |
| 295 | 313 |
| 296 directory = None | 314 directory = None |
| 297 config_of_interest = None | 315 config_of_interest = None |
| 298 bench_of_interest = None | 316 bench_of_interest = None |
| 299 time_of_interest = None | 317 time_of_interest = None |
| 300 time_to_ignore = None | 318 time_to_ignore = None |
| 301 bench_expectations = {} | 319 bench_expectations = {} |
| 320 appengine_url = None # used for adding data to appengine datastore | |
| 302 rep = None # bench representation algorithm | 321 rep = None # bench representation algorithm |
| 303 revision_range = '0:' | 322 revision_range = '0:' |
| 304 regression_range = '0:' | 323 regression_range = '0:' |
| 305 latest_revision = None | 324 latest_revision = None |
| 306 requested_height = None | 325 requested_height = None |
| 307 requested_width = None | 326 requested_width = None |
| 308 title = 'Bench graph' | 327 title = 'Bench graph' |
| 309 settings = {} | 328 settings = {} |
| 310 default_settings = {} | 329 default_settings = {} |
| 311 | 330 |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 363 # Skip benches without value for latest revision. | 382 # Skip benches without value for latest revision. |
| 364 continue | 383 continue |
| 365 this_min, this_max = expectations[bench_platform_key] | 384 this_min, this_max = expectations[bench_platform_key] |
| 366 if this_bench_value < this_min or this_bench_value > this_max: | 385 if this_bench_value < this_min or this_bench_value > this_max: |
| 367 exceptions.append('Bench %s value %s out of range [%s, %s].' % | 386 exceptions.append('Bench %s value %s out of range [%s, %s].' % |
| 368 (bench_platform_key, this_bench_value, this_min, this_max)) | 387 (bench_platform_key, this_bench_value, this_min, this_max)) |
| 369 if exceptions: | 388 if exceptions: |
| 370 raise Exception('Bench values out of range:\n' + | 389 raise Exception('Bench values out of range:\n' + |
| 371 '\n'.join(exceptions)) | 390 '\n'.join(exceptions)) |
| 372 | 391 |
| 392 def write_to_appengine(lines, url, newest_revision, bot): | |
| 393 """Writes latest bench values to appengine datastore. | |
| 394 lines: dictionary from create_lines. {Label:[(x,y)] | x[n] <= x[n+1]} | |
|
epoger
2013/03/06 17:17:22
I don't understand the notation at the end of this
benchen
2013/03/06 18:52:37
Just copied it from create_lines. Shall I remove i
| |
| 395 url: the appengine url used to send bench values to write | |
| 396 newest_revision: the latest revision that this script reads | |
| 397 bot: the bot platform the bench is run on | |
| 398 """ | |
| 399 data = [] | |
| 400 for line in lines: | |
|
epoger
2013/03/06 17:17:22
Please rename the "lines" param to something like
benchen
2013/03/06 18:52:37
Done.
| |
| 401 line_str = str(line)[ : str(line).find('_{')] | |
|
epoger
2013/03/06 17:17:22
Why are we stringifying the Label object in "line"
benchen
2013/03/06 18:52:37
You're right. I was still doing the "xhtml parsing
| |
| 402 if line_str.find('.skp') < 0 or not line_str.endswith('_'): | |
|
epoger
2013/03/06 17:17:22
If you're trying to filter the bench name, use lab
benchen
2013/03/06 18:52:37
Done.
| |
| 403 # filter out non-picture and non-walltime benches | |
| 404 continue | |
| 405 bench, config = line_str.split('.skp', 1) | |
| 406 config = config.strip('_') # remove leading and trailing '_' | |
| 407 rev, val = lines[line][-1] | |
| 408 if rev != newest_revision: | |
| 409 continue | |
| 410 data.append({'master': 'Skia', 'bot': bot, | |
| 411 'test': config + '/' + bench, | |
| 412 'revision': rev, 'value': val, 'error': 0}) | |
| 413 for curr_data in grouper(DATA_POINT_BATCHSIZE, data): | |
| 414 req = urllib2.Request(appengine_url, | |
| 415 urllib.urlencode({'data': json.dumps(curr_data)})) | |
| 416 try: | |
| 417 urllib2.urlopen(req) | |
| 418 except urllib2.HTTPError, e: | |
| 419 sys.stderr.write("HTTPError for JSON data %s: %s\n" % ( | |
| 420 data, e)) | |
| 421 except urllib2.URLError, e: | |
| 422 sys.stderr.write("URLError for JSON data %s: %s\n" % ( | |
| 423 data, e)) | |
| 424 except httplib.HTTPException, e: | |
| 425 sys.stderr.write("HTTPException for JSON data %s: %s\n" % ( | |
| 426 data, e)) | |
| 427 | |
| 373 try: | 428 try: |
| 374 for option, value in opts: | 429 for option, value in opts: |
| 375 if option == "-b": | 430 if option == "-a": |
| 431 appengine_url = value | |
| 432 elif option == "-b": | |
| 376 bench_of_interest = value | 433 bench_of_interest = value |
| 377 elif option == "-c": | 434 elif option == "-c": |
| 378 config_of_interest = value | 435 config_of_interest = value |
| 379 elif option == "-d": | 436 elif option == "-d": |
| 380 directory = value | 437 directory = value |
| 381 elif option == "-e": | 438 elif option == "-e": |
| 382 read_expectations(bench_expectations, value) | 439 read_expectations(bench_expectations, value) |
| 383 elif option == "-f": | 440 elif option == "-f": |
| 384 regression_range = value | 441 regression_range = value |
| 385 elif option == "-i": | 442 elif option == "-i": |
| (...skipping 28 matching lines...) Expand all Loading... | |
| 414 sys.exit(2) | 471 sys.exit(2) |
| 415 | 472 |
| 416 if time_of_interest: | 473 if time_of_interest: |
| 417 time_to_ignore = None | 474 time_to_ignore = None |
| 418 | 475 |
| 419 # The title flag (-l) provided in buildbot slave is in the format | 476 # The title flag (-l) provided in buildbot slave is in the format |
| 420 # Bench_Performance_for_Skia_<platform>, and we want to extract <platform> | 477 # Bench_Performance_for_Skia_<platform>, and we want to extract <platform> |
| 421 # for use in platform_and_alg to track matching benches later. If title flag | 478 # for use in platform_and_alg to track matching benches later. If title flag |
| 422 # is not in this format, there may be no matching benches in the file | 479 # is not in this format, there may be no matching benches in the file |
| 423 # provided by the expectation_file flag (-e). | 480 # provided by the expectation_file flag (-e). |
| 481 bot = title # To store the platform as bot name | |
| 424 platform_and_alg = title | 482 platform_and_alg = title |
| 425 if platform_and_alg.startswith(TITLE_PREAMBLE): | 483 if platform_and_alg.startswith(TITLE_PREAMBLE): |
| 426 platform_and_alg = ( | 484 bot = platform_and_alg[TITLE_PREAMBLE_LENGTH:] |
| 427 platform_and_alg[TITLE_PREAMBLE_LENGTH:] + '-' + rep) | 485 platform_and_alg = bot + '-' + rep |
| 428 title += ' [representation: %s]' % rep | 486 title += ' [representation: %s]' % rep |
| 429 | 487 |
| 430 latest_revision = get_latest_revision(directory) | 488 latest_revision = get_latest_revision(directory) |
| 431 oldest_revision, newest_revision = parse_range(revision_range) | 489 oldest_revision, newest_revision = parse_range(revision_range) |
| 432 oldest_regression, newest_regression = parse_range(regression_range) | 490 oldest_regression, newest_regression = parse_range(regression_range) |
| 433 | 491 |
| 434 unfiltered_revision_data_points = parse_dir(directory | 492 unfiltered_revision_data_points = parse_dir(directory |
| 435 , default_settings | 493 , default_settings |
| 436 , oldest_revision | 494 , oldest_revision |
| 437 , newest_revision | 495 , newest_revision |
| (...skipping 16 matching lines...) Expand all Loading... | |
| 454 , time_of_interest | 512 , time_of_interest |
| 455 , time_to_ignore) | 513 , time_to_ignore) |
| 456 | 514 |
| 457 regressions = create_regressions(lines | 515 regressions = create_regressions(lines |
| 458 , oldest_regression | 516 , oldest_regression |
| 459 , newest_regression) | 517 , newest_regression) |
| 460 | 518 |
| 461 output_xhtml(lines, oldest_revision, newest_revision, ignored_revision_data_ points, | 519 output_xhtml(lines, oldest_revision, newest_revision, ignored_revision_data_ points, |
| 462 regressions, requested_width, requested_height, title) | 520 regressions, requested_width, requested_height, title) |
| 463 | 521 |
| 522 if appengine_url: | |
| 523 write_to_appengine(lines, appengine_url, newest_revision, bot) | |
| 524 | |
| 464 check_expectations(lines, bench_expectations, newest_revision, | 525 check_expectations(lines, bench_expectations, newest_revision, |
| 465 platform_and_alg) | 526 platform_and_alg) |
| 466 | 527 |
| 467 def qa(out): | 528 def qa(out): |
| 468 """Stringify input and quote as an xml attribute.""" | 529 """Stringify input and quote as an xml attribute.""" |
| 469 return xml.sax.saxutils.quoteattr(str(out)) | 530 return xml.sax.saxutils.quoteattr(str(out)) |
| 470 def qe(out): | 531 def qe(out): |
| 471 """Stringify input and escape as xml data.""" | 532 """Stringify input and escape as xml data.""" |
| 472 return xml.sax.saxutils.escape(str(out)) | 533 return xml.sax.saxutils.escape(str(out)) |
| 473 | 534 |
| (...skipping 481 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 955 print '<a id="rev_link" xlink:href="" target="_top">' | 1016 print '<a id="rev_link" xlink:href="" target="_top">' |
| 956 print '<text id="revision" x="0" y=%s style="' % qa(font_size*2) | 1017 print '<text id="revision" x="0" y=%s style="' % qa(font_size*2) |
| 957 print 'font-size: %s; ' % qe(font_size) | 1018 print 'font-size: %s; ' % qe(font_size) |
| 958 print 'stroke: #0000dd; text-decoration: underline; ' | 1019 print 'stroke: #0000dd; text-decoration: underline; ' |
| 959 print '"> </text></a>' | 1020 print '"> </text></a>' |
| 960 | 1021 |
| 961 print '</svg>' | 1022 print '</svg>' |
| 962 | 1023 |
| 963 if __name__ == "__main__": | 1024 if __name__ == "__main__": |
| 964 main() | 1025 main() |
| OLD | NEW |