OLD | NEW |
1 #!/usr/bin/python | 1 #!/usr/bin/python |
2 | 2 |
3 # Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file | 3 # Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file |
4 # for details. All rights reserved. Use of this source code is governed by a | 4 # for details. All rights reserved. Use of this source code is governed by a |
5 # BSD-style license that can be found in the LICENSE file. | 5 # BSD-style license that can be found in the LICENSE file. |
6 | 6 |
7 import datetime | 7 import datetime |
8 import math | 8 import math |
9 try: | 9 from matplotlib.font_manager import FontProperties |
10 from matplotlib.font_manager import FontProperties | 10 import matplotlib.pyplot as plt |
11 import matplotlib.pyplot as plt | |
12 except ImportError: | |
13 print 'Warning: no matplotlib. ' + \ | |
14 'Please ignore if you are running buildbot smoketests.' | |
15 import optparse | 11 import optparse |
16 import os | 12 import os |
17 from os.path import dirname, abspath | 13 from os.path import dirname, abspath |
18 import platform | 14 import platform |
19 import shutil | 15 import shutil |
20 import subprocess | 16 import subprocess |
21 import time | 17 import time |
22 import traceback | 18 import traceback |
23 import sys | 19 import sys |
24 | 20 |
(...skipping 10 matching lines...) Expand all Loading... |
35 V8_MEAN = 'V8 Mean' | 31 V8_MEAN = 'V8 Mean' |
36 FROG_MEAN = 'frog Mean' | 32 FROG_MEAN = 'frog Mean' |
37 COMMAND_LINE = 'commandline' | 33 COMMAND_LINE = 'commandline' |
38 V8 = 'v8' | 34 V8 = 'v8' |
39 FROG = 'frog' | 35 FROG = 'frog' |
40 V8_AND_FROG = [V8, FROG] | 36 V8_AND_FROG = [V8, FROG] |
41 CORRECTNESS = 'Percent passing' | 37 CORRECTNESS = 'Percent passing' |
42 COLORS = ['blue', 'green', 'red', 'cyan', 'magenta', 'black'] | 38 COLORS = ['blue', 'green', 'red', 'cyan', 'magenta', 'black'] |
43 GRAPH_OUT_DIR = 'graphs' | 39 GRAPH_OUT_DIR = 'graphs' |
44 SLEEP_TIME = 200 | 40 SLEEP_TIME = 200 |
45 PERFBOT_MODE = False | |
46 VERBOSE = False | 41 VERBOSE = False |
47 HAS_SHELL = False | 42 HAS_SHELL = False |
48 if platform.system() == 'Windows': | 43 if platform.system() == 'Windows': |
49 # On Windows, shell must be true to get the correct environment variables. | 44 # On Windows, shell must be true to get the correct environment variables. |
50 HAS_SHELL = True | 45 HAS_SHELL = True |
51 | 46 |
52 """First, some utility methods.""" | 47 """First, some utility methods.""" |
53 | 48 |
54 def run_cmd(cmd_list, outfile=None, append=False): | 49 def run_cmd(cmd_list, outfile=None, append=False): |
55 """Run the specified command and print out any output to stdout. | 50 """Run the specified command and print out any output to stdout. |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
123 def has_new_code(): | 118 def has_new_code(): |
124 """Tests if there are any newer versions of files on the server.""" | 119 """Tests if there are any newer versions of files on the server.""" |
125 os.chdir(DART_INSTALL_LOCATION) | 120 os.chdir(DART_INSTALL_LOCATION) |
126 results = run_cmd(['svn', 'st', '-u']) | 121 results = run_cmd(['svn', 'st', '-u']) |
127 for line in results: | 122 for line in results: |
128 if '*' in line: | 123 if '*' in line: |
129 return True | 124 return True |
130 return False | 125 return False |
131 | 126 |
132 def get_browsers(): | 127 def get_browsers(): |
133 if not PERFBOT_MODE: | 128 browsers = ['ff', 'chrome'] |
134 # Only Firefox (and Chrome, but we have Dump Render Tree) works in Linux | 129 if platform.system() == 'Darwin': |
135 return ['ff'] | 130 browsers += ['safari'] |
136 browsers = ['ff', 'chrome', 'safari'] | |
137 if platform.system() == 'Windows': | 131 if platform.system() == 'Windows': |
138 browsers += ['ie'] | 132 browsers += ['ie'] |
139 return browsers | 133 return browsers |
140 | 134 |
141 def get_versions(): | 135 def get_versions(): |
142 if not PERFBOT_MODE: | 136 return V8_AND_FROG |
143 return [FROG] | |
144 else: | |
145 return V8_AND_FROG | |
146 | 137 |
147 def get_benchmarks(): | 138 def get_benchmarks(): |
148 if not PERFBOT_MODE: | 139 return ['Mandelbrot', 'DeltaBlue', 'Richards', 'NBody', 'BinaryTrees', |
149 return ['Smoketest'] | 140 'Fannkuch', 'Meteor', 'BubbleSort', 'Fibonacci', 'Loop', 'Permute', |
150 else: | 141 'Queens', 'QuickSort', 'Recurse', 'Sieve', 'Sum', 'Tak', 'Takl', 'Towers', |
151 return ['Mandelbrot', 'DeltaBlue', 'Richards', 'NBody', 'BinaryTrees', | 142 'TreeSort'] |
152 'Fannkuch', 'Meteor', 'BubbleSort', 'Fibonacci', 'Loop', 'Permute', | |
153 'Queens', 'QuickSort', 'Recurse', 'Sieve', 'Sum', 'Tak', 'Takl', 'Towers', | |
154 'TreeSort'] | |
155 | 143 |
156 def get_os_directory(): | 144 def get_os_directory(): |
157 """Specifies the name of the directory for the testing build of dart, which | 145 """Specifies the name of the directory for the testing build of dart, which |
158 has yet a different naming convention from utils.getBuildRoot(...).""" | 146 has yet a different naming convention from utils.getBuildRoot(...).""" |
159 if platform.system() == 'Windows': | 147 if platform.system() == 'Windows': |
160 return 'windows' | 148 return 'windows' |
161 elif platform.system() == 'Darwin': | 149 elif platform.system() == 'Darwin': |
162 return 'macos' | 150 return 'macos' |
163 else: | 151 else: |
164 return 'linux' | 152 return 'linux' |
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
317 mean = V8_MEAN | 305 mean = V8_MEAN |
318 if frog_or_v8 == FROG: | 306 if frog_or_v8 == FROG: |
319 mean = FROG_MEAN | 307 mean = FROG_MEAN |
320 self.values_dict[platform][frog_or_v8][mean] += \ | 308 self.values_dict[platform][frog_or_v8][mean] += \ |
321 [math.pow(math.e, geo_mean / len(get_benchmarks()))] | 309 [math.pow(math.e, geo_mean / len(get_benchmarks()))] |
322 self.revision_dict[platform][frog_or_v8][mean] += [svn_revision] | 310 self.revision_dict[platform][frog_or_v8][mean] += [svn_revision] |
323 | 311 |
324 def run(self): | 312 def run(self): |
325 """Run the benchmarks/tests from the command line and plot the | 313 """Run the benchmarks/tests from the command line and plot the |
326 results.""" | 314 results.""" |
327 if PERFBOT_MODE: | 315 plt.cla() # cla = clear current axes |
328 plt.cla() # cla = clear current axes | |
329 os.chdir(DART_INSTALL_LOCATION) | 316 os.chdir(DART_INSTALL_LOCATION) |
330 ensure_output_directory(self.result_folder_name) | 317 ensure_output_directory(self.result_folder_name) |
331 ensure_output_directory(GRAPH_OUT_DIR) | 318 ensure_output_directory(GRAPH_OUT_DIR) |
332 self.run_tests() | 319 self.run_tests() |
333 os.chdir(os.path.join('tools', 'testing', 'perf_testing')) | 320 os.chdir(os.path.join('tools', 'testing', 'perf_testing')) |
334 | 321 |
335 # TODO(efortuna): You will want to make this only use a subset of the files | 322 # TODO(efortuna): You will want to make this only use a subset of the files |
336 # eventually. | 323 # eventually. |
337 files = os.listdir(self.result_folder_name) | 324 files = os.listdir(self.result_folder_name) |
338 | 325 |
339 for afile in files: | 326 for afile in files: |
340 if not afile.startswith('.'): | 327 if not afile.startswith('.'): |
341 self.process_file(afile) | 328 self.process_file(afile) |
342 | 329 |
343 if PERFBOT_MODE: | 330 self.plot_results('%s.png' % self.result_folder_name) |
344 self.plot_results('%s.png' % self.result_folder_name) | |
345 | 331 |
346 class PerformanceTestRunner(TestRunner): | 332 class PerformanceTestRunner(TestRunner): |
347 """Super class for all performance testing.""" | 333 """Super class for all performance testing.""" |
348 def __init__(self, result_folder_name, platform_list, platform_type): | 334 def __init__(self, result_folder_name, platform_list, platform_type): |
349 super(PerformanceTestRunner, self).__init__(result_folder_name, | 335 super(PerformanceTestRunner, self).__init__(result_folder_name, |
350 platform_list, get_versions(), get_benchmarks()) | 336 platform_list, get_versions(), get_benchmarks()) |
351 self.platform_list = platform_list | 337 self.platform_list = platform_list |
352 self.platform_type = platform_type | 338 self.platform_type = platform_type |
353 | 339 |
354 def plot_all_perf(self, png_filename): | 340 def plot_all_perf(self, png_filename): |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
433 | 419 |
434 class BrowserPerformanceTestRunner(PerformanceTestRunner): | 420 class BrowserPerformanceTestRunner(PerformanceTestRunner): |
435 """Runs performance tests, in the browser.""" | 421 """Runs performance tests, in the browser.""" |
436 | 422 |
437 def __init__(self, result_folder_name): | 423 def __init__(self, result_folder_name): |
438 super(BrowserPerformanceTestRunner, self).__init__( | 424 super(BrowserPerformanceTestRunner, self).__init__( |
439 result_folder_name, get_browsers(), 'browser') | 425 result_folder_name, get_browsers(), 'browser') |
440 | 426 |
441 def run_tests(self): | 427 def run_tests(self): |
442 """Run a performance test in the browser.""" | 428 """Run a performance test in the browser.""" |
443 # For the smoke test, just run a simple test, not the actual benchmarks to | |
444 # ensure we haven't broken the Firefox DOM. | |
445 | 429 |
446 os.chdir('frog') | 430 os.chdir('frog') |
447 if PERFBOT_MODE: | 431 run_cmd(['python', os.path.join('benchmarks', 'make_web_benchmarks.py')]) |
448 run_cmd(['python', os.path.join('benchmarks', 'make_web_benchmarks.py')]) | |
449 else: | |
450 run_cmd(['./minfrog', '--out=../tools/testing/perf_testing/smoketest/' + \ | |
451 'smoketest_frog.js', '--libdir=%s/lib' % os.getcwd(), | |
452 '--compile-only', '../tools/testing/perf_testing/smoketest/' + \ | |
453 'dartWebBase.dart']) | |
454 os.chdir('..') | 432 os.chdir('..') |
455 | 433 |
456 for browser in get_browsers(): | 434 for browser in get_browsers(): |
457 for version in get_versions(): | 435 for version in get_versions(): |
458 self.trace_file = os.path.join('tools', 'testing', 'perf_testing', | 436 self.trace_file = os.path.join('tools', 'testing', 'perf_testing', |
459 self.result_folder_name, | 437 self.result_folder_name, |
460 'perf-%s-%s-%s' % (self.cur_time, browser, version)) | 438 'perf-%s-%s-%s' % (self.cur_time, browser, version)) |
461 self.add_svn_revision_to_trace(self.trace_file) | 439 self.add_svn_revision_to_trace(self.trace_file) |
462 file_path = os.path.join(os.getcwd(), 'internal', 'browserBenchmarks', | 440 file_path = os.path.join(os.getcwd(), 'internal', 'browserBenchmarks', |
463 'benchmark_page_%s.html' % version) | 441 'benchmark_page_%s.html' % version) |
464 if not PERFBOT_MODE: | |
465 file_path = os.path.join(os.getcwd(), 'tools', 'testing', | |
466 'perf_testing', 'smoketest', 'smoketest_%s.html' % version) | |
467 run_cmd(['python', os.path.join('tools', 'testing', 'run_selenium.py'), | 442 run_cmd(['python', os.path.join('tools', 'testing', 'run_selenium.py'), |
468 '--out', file_path, '--browser', browser, | 443 '--out', file_path, '--browser', browser, |
469 '--timeout', '600', '--perf'], self.trace_file, append=True) | 444 '--timeout', '600', '--perf'], self.trace_file, append=True) |
470 | 445 |
471 def process_file(self, afile): | 446 def process_file(self, afile): |
472 """Comb through the html to find the performance results.""" | 447 """Comb through the html to find the performance results.""" |
473 parts = afile.split('-') | 448 parts = afile.split('-') |
474 browser = parts[2] | 449 browser = parts[2] |
475 version = parts[3] | 450 version = parts[3] |
476 f = open(os.path.join(self.result_folder_name, afile)) | 451 f = open(os.path.join(self.result_folder_name, afile)) |
477 lines = f.readlines() | 452 lines = f.readlines() |
478 line = '' | 453 line = '' |
479 i = 0 | 454 i = 0 |
480 revision_num = 0 | 455 revision_num = 0 |
481 while '<div id="results">' not in line and i < len(lines): | 456 while '<div id="results">' not in line and i < len(lines): |
482 if 'Revision' in line: | 457 if 'Revision' in line: |
483 revision_num = int(line.split()[1]) | 458 revision_num = int(line.split()[1]) |
484 line = lines[i] | 459 line = lines[i] |
485 i += 1 | 460 i += 1 |
486 | 461 |
487 if i >= len(lines) or revision_num == 0: | 462 if i >= len(lines) or revision_num == 0: |
488 # Then this run did not complete. Ignore this tracefile. or in the case of | 463 # Then this run did not complete. Ignore this tracefile. |
489 # the smoke test, report an error. | |
490 if not PERFBOT_MODE: | |
491 print 'FAIL %s %s' % (browser, version) | |
492 os.remove(os.path.join(self.result_folder_name, afile)) | |
493 return | 464 return |
494 | 465 |
495 line = lines[i] | 466 line = lines[i] |
496 i += 1 | 467 i += 1 |
497 results = [] | 468 results = [] |
498 if line.find('<br>') > -1: | 469 if line.find('<br>') > -1: |
499 results = line.split('<br>') | 470 results = line.split('<br>') |
500 else: | 471 else: |
501 results = line.split('<br />') | 472 results = line.split('<br />') |
502 for result in results: | 473 for result in results: |
503 name_and_score = result.split(':') | 474 name_and_score = result.split(':') |
504 if len(name_and_score) < 2: | 475 if len(name_and_score) < 2: |
505 break | 476 break |
506 name = name_and_score[0].strip() | 477 name = name_and_score[0].strip() |
507 score = name_and_score[1].strip() | 478 score = name_and_score[1].strip() |
508 if version == V8: | 479 if version == V8: |
509 bench_dict = self.values_dict[browser][V8] | 480 bench_dict = self.values_dict[browser][V8] |
510 else: | 481 else: |
511 bench_dict = self.values_dict[browser][FROG] | 482 bench_dict = self.values_dict[browser][FROG] |
512 bench_dict[name] += [float(score)] | 483 bench_dict[name] += [float(score)] |
513 self.revision_dict[browser][version][name] += [revision_num] | 484 self.revision_dict[browser][version][name] += [revision_num] |
514 | 485 |
515 f.close() | 486 f.close() |
516 if not PERFBOT_MODE: | 487 self.calculate_geometric_mean(browser, version, revision_num) |
517 print 'PASS' | |
518 os.remove(os.path.join(self.result_folder_name, afile)) | |
519 else: | |
520 self.calculate_geometric_mean(browser, version, revision_num) | |
521 | 488 |
522 def write_html(self, delimiter, rev_nums, label_1, dict_1, label_2, dict_2, | 489 def write_html(self, delimiter, rev_nums, label_1, dict_1, label_2, dict_2, |
523 cleanFile=False): | 490 cleanFile=False): |
524 #TODO(efortuna) | 491 #TODO(efortuna) |
525 pass | 492 pass |
526 | 493 |
527 | 494 |
528 class BrowserCorrectnessTestRunner(TestRunner): | 495 class BrowserCorrectnessTestRunner(TestRunner): |
529 def __init__(self, test_type, result_folder_name): | 496 def __init__(self, test_type, result_folder_name): |
530 super(BrowserCorrectnessTestRunner, self).__init__(result_folder_name, | 497 super(BrowserCorrectnessTestRunner, self).__init__(result_folder_name, |
531 get_browsers(), [FROG], [CORRECTNESS]) | 498 get_browsers(), [FROG], [CORRECTNESS]) |
532 self.test_type = test_type | 499 self.test_type = test_type |
533 | 500 |
534 def run_tests(self): | 501 def run_tests(self): |
535 """run a test of the latest svn revision.""" | 502 """run a test of the latest svn revision.""" |
536 the_os = get_os_directory() | 503 system = get_os_directory() |
537 suffix = '' | 504 suffix = '' |
538 if platform.system() == 'Windows': | 505 if platform.system() == 'Windows': |
539 suffix = '.exe' | 506 suffix = '.exe' |
540 for browser in get_browsers(): | 507 for browser in get_browsers(): |
541 current_file = 'correctness%s-%s' % (self.cur_time, browser) | 508 current_file = 'correctness%s-%s' % (self.cur_time, browser) |
542 self.trace_file = os.path.join('tools', 'testing', | 509 self.trace_file = os.path.join('tools', 'testing', |
543 'perf_testing', self.result_folder_name, current_file) | 510 'perf_testing', self.result_folder_name, current_file) |
544 self.add_svn_revision_to_trace(self.trace_file) | 511 self.add_svn_revision_to_trace(self.trace_file) |
545 dart_sdk = os.path.join(os.getcwd(), utils.GetBuildRoot(utils.GuessOS(), | 512 dart_sdk = os.path.join(os.getcwd(), utils.GetBuildRoot(utils.GuessOS(), |
546 'release', 'ia32'), 'dart-sdk') | 513 'release', 'ia32'), 'dart-sdk') |
547 run_cmd([os.path.join('.', 'tools', 'testing', 'bin', the_os, | 514 run_cmd([os.path.join('.', 'tools', 'testing', 'bin', system, |
548 'dart' + suffix), os.path.join('tools', 'test.dart'), | 515 'dart' + suffix), os.path.join('tools', 'test.dart'), |
549 '--component=webdriver', '--flag=%s,--frog=%s,--froglib=%s' % \ | 516 '--component=webdriver', '--flag=%s,--frog=%s,--froglib=%s' % \ |
550 (browser, os.path.join(dart_sdk, 'bin', 'frogc'), | 517 (browser, os.path.join(dart_sdk, 'bin', 'frogc'), |
551 os.path.join(dart_sdk, 'lib')), '--report', | 518 os.path.join(dart_sdk, 'lib')), '--report', |
552 '--timeout=20', '--progress=color', '--mode=release', '-j1', | 519 '--timeout=20', '--progress=color', '--mode=release', '-j1', |
553 self.test_type], self.trace_file, append=True) | 520 self.test_type], self.trace_file, append=True) |
554 | 521 |
555 def process_file(self, afile): | 522 def process_file(self, afile): |
556 """Given a trace file, extract all the relevant information out of it to | 523 """Given a trace file, extract all the relevant information out of it to |
557 determine the number of correctly passing tests. | 524 determine the number of correctly passing tests. |
(...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
712 action = 'store_true', default = False) | 679 action = 'store_true', default = False) |
713 parser.add_option('--language', '-l', dest = 'language', | 680 parser.add_option('--language', '-l', dest = 'language', |
714 help = 'Run the language correctness tests', | 681 help = 'Run the language correctness tests', |
715 action = 'store_true', default = False) | 682 action = 'store_true', default = False) |
716 parser.add_option('--browser-perf', '-b', dest = 'perf', | 683 parser.add_option('--browser-perf', '-b', dest = 'perf', |
717 help = 'Run the browser performance tests', | 684 help = 'Run the browser performance tests', |
718 action = 'store_true', default = False) | 685 action = 'store_true', default = False) |
719 parser.add_option('--forever', '-f', dest = 'continuous', | 686 parser.add_option('--forever', '-f', dest = 'continuous', |
720 help = 'Run this script forever, always checking for the next svn ' | 687 help = 'Run this script forever, always checking for the next svn ' |
721 'checkin', action = 'store_true', default = False) | 688 'checkin', action = 'store_true', default = False) |
722 parser.add_option('--perfbot', '-p', dest = 'perfbot', | |
723 help = "Run in perfbot mode. (Generate plots, and keep trace files)", | |
724 action = 'store_true', default = False) | |
725 parser.add_option('--verbose', '-v', dest = 'verbose', | 689 parser.add_option('--verbose', '-v', dest = 'verbose', |
726 help = 'Print extra debug output', action = 'store_true', default = False) | 690 help = 'Print extra debug output', action = 'store_true', default = False) |
727 | 691 |
728 args, ignored = parser.parse_args() | 692 args, ignored = parser.parse_args() |
729 if not (args.cl or args.size or args.language or args.perf): | 693 if not (args.cl or args.size or args.language or args.perf): |
730 args.cl = args.size = args.language = args.perf = True | 694 args.cl = args.size = args.language = args.perf = True |
731 return (args.cl, args.size, args.language, args.perf, args.continuous, | 695 return (args.cl, args.size, args.language, args.perf, args.continuous, |
732 args.perfbot, args.verbose) | 696 args.verbose) |
733 | 697 |
734 def run_test_sequence(cl, size, language, perf): | 698 def run_test_sequence(cl, size, language, perf): |
735 if PERFBOT_MODE: | 699 # The buildbot already builds and syncs to a specific revision. Don't fight |
736 # The buildbot already builds and syncs to a specific revision. Don't fight | 700 # with it or replicate work. |
737 # with it or replicate work. | 701 if sync_and_build() == 1: |
738 if sync_and_build() == 1: | 702 return # The build is broken. |
739 return # The build is broken. | |
740 if cl: | 703 if cl: |
741 CommandLinePerformanceTestRunner('cl-results').run() | 704 CommandLinePerformanceTestRunner('cl-results').run() |
742 if size: | 705 if size: |
743 CompileTimeAndSizeTestRunner('code-time-size').run() | 706 CompileTimeAndSizeTestRunner('code-time-size').run() |
744 if language: | 707 if language: |
745 BrowserCorrectnessTestRunner('language', 'browser-correctness').run() | 708 BrowserCorrectnessTestRunner('language', 'browser-correctness').run() |
746 if perf: | 709 if perf: |
747 BrowserPerformanceTestRunner('browser-perf').run() | 710 BrowserPerformanceTestRunner('browser-perf').run() |
748 | 711 |
749 if PERFBOT_MODE: | 712 # TODO(efortuna): Temporarily disabled until you make a safe way to provide |
750 # TODO(efortuna): Temporarily disabled until you make a safe way to provide | 713 # your username/password for the uploading process. |
751 # your username/password for the uploading process. | 714 #upload_to_app_engine() |
752 #upload_to_app_engine() | |
753 pass | |
754 | 715 |
755 def main(): | 716 def main(): |
756 global PERFBOT_MODE, VERBOSE | 717 global VERBOSE |
757 (cl, size, language, perf, continuous, perfbot, verbose) = parse_args() | 718 (cl, size, language, perf, continuous, verbose) = parse_args() |
758 PERFBOT_MODE = perfbot | |
759 VERBOSE = verbose | 719 VERBOSE = verbose |
760 if continuous: | 720 if continuous: |
761 while True: | 721 while True: |
762 if has_new_code(): | 722 if has_new_code(): |
763 run_test_sequence(cl, size, language, perf) | 723 run_test_sequence(cl, size, language, perf) |
764 else: | 724 else: |
765 time.sleep(SLEEP_TIME) | 725 time.sleep(SLEEP_TIME) |
766 else: | 726 else: |
767 run_test_sequence(cl, size, language, perf) | 727 run_test_sequence(cl, size, language, perf) |
768 | 728 |
769 if __name__ == '__main__': | 729 if __name__ == '__main__': |
770 main() | 730 main() |
771 | 731 |
OLD | NEW |