| OLD | NEW | 
|---|
|  | (Empty) | 
| 1 #!/usr/bin/python |  | 
| 2 |  | 
| 3 # Copyright (c) 2011, the Dart project authors.  Please see the AUTHORS file |  | 
| 4 # for details. All rights reserved. Use of this source code is governed by a |  | 
| 5 # BSD-style license that can be found in the LICENSE file. |  | 
| 6 |  | 
| 7 import create_graph |  | 
| 8 from create_graph import BROWSER_CORRECTNESS |  | 
| 9 from create_graph import BROWSER_PERF |  | 
| 10 from create_graph import CL_PERF |  | 
| 11 from create_graph import COMMAND_LINE |  | 
| 12 from create_graph import CORRECTNESS |  | 
| 13 from create_graph import FROG |  | 
| 14 from create_graph import FROG_MEAN |  | 
| 15 from create_graph import TIME_SIZE |  | 
| 16 from create_graph import V8_AND_FROG |  | 
| 17 |  | 
| 18 import get_current_stats |  | 
| 19 import os |  | 
| 20 from os.path import dirname, abspath |  | 
| 21 import pickle |  | 
| 22 import sys |  | 
| 23 |  | 
| 24 """Compare the current performance statistics with the statistics 24 hours ago, |  | 
| 25 and send out an email summarizing the differences.""" |  | 
| 26 |  | 
| 27 def calculate_stats(): |  | 
| 28   """Compare the numbers that were available at the start of the day to what the |  | 
| 29   current numbers are. |  | 
| 30 |  | 
| 31   Returns: |  | 
| 32     A string providing an update on the latest perfromance numbers.""" |  | 
| 33   test_runner_dict = pickle.load(open(get_current_stats.PICKLE_FILENAME, 'r')) |  | 
| 34   test_runner_dict = get_current_stats.populate_stats_dict(test_runner_dict) |  | 
| 35 |  | 
| 36   browser_perf = test_runner_dict[BROWSER_PERF] |  | 
| 37   time_size = test_runner_dict[TIME_SIZE] |  | 
| 38   cl = test_runner_dict[CL_PERF] |  | 
| 39   correctness = test_runner_dict[BROWSER_CORRECTNESS] |  | 
| 40   output = summary_stats(browser_perf, time_size, cl, correctness) |  | 
| 41   output += specific_stats(browser_perf, time_size, cl) |  | 
| 42   return output |  | 
| 43 |  | 
| 44 def summary_stats(browser_perf, time_size, cl, correctness): |  | 
| 45   """Return the summarized stats report. |  | 
| 46 |  | 
| 47   Args: |  | 
| 48     browser_perf: BrowserPerformanceTestRunner object. Holds browser perf stats. |  | 
| 49     time_size: CompileTimeSizeTestRunner object. |  | 
| 50     cl: CommandLinePerformanceTestRunner object. |  | 
| 51     correctness: BrowserCorrectnessTestRunner object. |  | 
| 52   """ |  | 
| 53   output = "Summary of changes in the last 24 hours: \n\nBrowser " + \ |  | 
| 54       "performance: (revision %d)\n" % \ |  | 
| 55       browser_perf.revision_dict[create_graph.get_browsers()[0]][FROG]\ |  | 
| 56       [browser_perf.values_list[0]][1] |  | 
| 57   for browser in create_graph.get_browsers(): |  | 
| 58     geo_mean_list = browser_perf.values_dict[browser][FROG][FROG_MEAN] |  | 
| 59     # TODO(efortuna): deal with the fact that the latest of all browsers may not |  | 
| 60     # be available. |  | 
| 61     output += "  %s%s\n" % ((browser + ':').ljust(25), |  | 
| 62         str(geo_mean_list[1] - geo_mean_list[0]).rjust(10)) |  | 
| 63 |  | 
| 64   output += "\nCompile Size and Time: (revision %d)\n" % \ |  | 
| 65       time_size.revision_dict[COMMAND_LINE][FROG][time_size.values_list[0]][1] |  | 
| 66   for metric in time_size.values_list: |  | 
| 67     metric_list = time_size.values_dict[COMMAND_LINE][FROG][metric] |  | 
| 68     output += "  %s%s\n" % ((metric + ':').ljust(25), |  | 
| 69         str(metric_list[1] - metric_list[0]).rjust(10)) |  | 
| 70 |  | 
| 71   output += "\nPercentage of language tests passing (revision %d)\n" % \ |  | 
| 72       correctness.revision_dict['chrome'][FROG][correctness.values_list[0]][1] |  | 
| 73   for browser in create_graph.get_browsers(): |  | 
| 74     num_correct = correctness.values_dict[browser][FROG][CORRECTNESS] |  | 
| 75     output += "  %s%s%%  more passing\n" % ((browser + ':').ljust(25), |  | 
| 76         str(num_correct[1] - num_correct[0]).rjust(10)) |  | 
| 77 |  | 
| 78   output += "\nCommandline performance: (revision %d)\n" % \ |  | 
| 79       cl.revision_dict[COMMAND_LINE][FROG][cl.values_list[0]][1] |  | 
| 80   for benchmark in cl.values_list: |  | 
| 81     bench_list = cl.values_dict[COMMAND_LINE][FROG][benchmark] |  | 
| 82     output += "  %s%s\n" % ((benchmark + ':').ljust(25), |  | 
| 83         str(bench_list[1] - bench_list[0]).rjust(10)) |  | 
| 84   return output |  | 
| 85 |  | 
| 86 def specific_stats(browser_perf, time_size, cl): |  | 
| 87   """Return a string detailing all of the gory details and specifics on |  | 
| 88   benchmark numbers and individual benchmark changes. |  | 
| 89 |  | 
| 90   Args: |  | 
| 91     browser_perf: BrowserPerformanceTestRunner object. Holds browser perf stats. |  | 
| 92     time_size: CompileTimeSizeTestRunner object. |  | 
| 93     cl: CommandLinePerformanceTestRunner object. |  | 
| 94   """ |  | 
| 95   output = "\n\n---------------------------------------------\nThe latest " + \ |  | 
| 96       "current raw numbers (and changes) for those " + \ |  | 
| 97       "interested:\nBrowser performance:\n" |  | 
| 98   for v8_or_frog in V8_AND_FROG: |  | 
| 99     for browser in create_graph.get_browsers(): |  | 
| 100       output += "  %s %s:\n" % (browser, v8_or_frog) |  | 
| 101       for benchmark in create_graph.get_benchmarks(): |  | 
| 102         bench_list = browser_perf.values_dict[browser][v8_or_frog][benchmark] |  | 
| 103         output += "    %s %s%s\n" % ((benchmark + ':').ljust(25), |  | 
| 104             str(bench_list[1]).rjust(10), get_amount_changed(bench_list)) |  | 
| 105 |  | 
| 106   output += "\nCompile Size and Time for frog:\n" |  | 
| 107   for metric in time_size.values_list: |  | 
| 108     metric_list = time_size.values_dict[COMMAND_LINE][FROG][metric] |  | 
| 109     output += "    %s %s%s\n" % ((metric + ':').ljust(25), |  | 
| 110         str(metric_list[1]).rjust(10), get_amount_changed(metric_list)) |  | 
| 111 |  | 
| 112   output += "\nCommandline performance:\n" |  | 
| 113   for v8_or_frog in V8_AND_FROG: |  | 
| 114     output += '  %s:\n' % v8_or_frog |  | 
| 115     for benchmark in cl.values_list: |  | 
| 116       bench_list = cl.values_dict[COMMAND_LINE][v8_or_frog][benchmark] |  | 
| 117       output += "    %s %s%s\n" % ((benchmark + ':').ljust(25), |  | 
| 118           str(bench_list[1]).rjust(10), get_amount_changed(bench_list)) |  | 
| 119 |  | 
| 120   return output |  | 
| 121 |  | 
| 122 def get_amount_changed(values_tuple): |  | 
| 123   """Return a formatted string indicating the amount of change (positive or |  | 
| 124   negative) in the benchmark since the last run. |  | 
| 125 |  | 
| 126   Args: |  | 
| 127     values_tuple: the tuple of values we are comparing the difference |  | 
| 128     between.""" |  | 
| 129   difference = values_tuple[1] - values_tuple[0] |  | 
| 130   prefix = '+' |  | 
| 131   if difference < 0: |  | 
| 132     prefix = '-' |  | 
| 133   return ("(%s%s)" % (prefix, str(difference))).rjust(10) |  | 
| 134 |  | 
| 135 |  | 
| 136 def main(): |  | 
| 137   stats = calculate_stats() |  | 
| 138   print stats |  | 
| 139 |  | 
| 140 if __name__ == '__main__': |  | 
| 141   main() |  | 
| OLD | NEW | 
|---|