Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(906)

Side by Side Diff: mojo/devtools/common/devtoolslib/benchmark.py

Issue 1433693004: mojo_benchmark: aggregate results over multiple runs. (Closed) Base URL: git@github.com:domokit/mojo.git@master
Patch Set: Address Ben's comments. Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | mojo/devtools/common/devtoolslib/perf_dashboard.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright 2015 The Chromium Authors. All rights reserved. 1 # Copyright 2015 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 """Logic that drives runs of the benchmarking mojo app and parses its output.""" 5 """Logic that drives runs of the benchmarking mojo app and parses its output."""
6 6
7 import os.path 7 import os.path
8 import re 8 import re
9 9
10 _BENCHMARK_APP = 'https://core.mojoapps.io/benchmark.mojo' 10 _BENCHMARK_APP = 'https://core.mojoapps.io/benchmark.mojo'
(...skipping 25 matching lines...) Expand all
36 if match: 36 if match:
37 measurement_spec = match.group(1) 37 measurement_spec = match.group(1)
38 measurement_result = match.group(2) 38 measurement_result = match.group(2)
39 try: 39 try:
40 measurement_results[measurement_spec] = float(measurement_result) 40 measurement_results[measurement_spec] = float(measurement_result)
41 except ValueError: 41 except ValueError:
42 pass 42 pass
43 return measurement_results 43 return measurement_results
44 44
45 45
46 class Results(object): 46 class Outcome(object):
47 """Holds results of a benchmark run.""" 47 """Holds results of a benchmark run."""
48 48
49 def __init__(self, succeeded, error_str, output): 49 def __init__(self, succeeded, error_str, output):
50 self.succeeded = succeeded 50 self.succeeded = succeeded
51 self.error_str = error_str 51 self.error_str = error_str
52 self.output = output 52 self.output = output
53 self.measurements = None 53 # Maps measurement specs to measurement results given as floats. Only
54 # measurements that succeeded (ie. we retrieved their results) are
55 # represented.
56 self.results = {}
57 self.some_measurements_failed = False
54 58
55 59
56 def run(shell, shell_args, app, duration_seconds, measurements, verbose, 60 def run(shell, shell_args, app, duration_seconds, measurements, verbose,
57 android, output_file): 61 android, output_file):
58 """Runs the given benchmark by running `benchmark.mojo` in mojo shell with 62 """Runs the given benchmark by running `benchmark.mojo` in mojo shell with
59 appropriate arguments and returns the produced output. 63 appropriate arguments and returns the produced output.
60 64
61 Returns: 65 Returns:
62 A tuple of (succeeded, error_msg, output). 66 An instance of Outcome holding the results of the run.
63 """ 67 """
64 timeout = duration_seconds + _EXTRA_TIMEOUT 68 timeout = duration_seconds + _EXTRA_TIMEOUT
65 benchmark_args = [] 69 benchmark_args = []
66 benchmark_args.append('--app=' + app) 70 benchmark_args.append('--app=' + app)
67 benchmark_args.append('--duration=' + str(duration_seconds)) 71 benchmark_args.append('--duration=' + str(duration_seconds))
68 72
69 device_output_file = None 73 device_output_file = None
70 if output_file: 74 if output_file:
71 if android: 75 if android:
72 device_output_file = os.path.join(shell.get_tmp_dir_path(), output_file) 76 device_output_file = os.path.join(shell.get_tmp_dir_path(), output_file)
73 benchmark_args.append('--trace-output=' + device_output_file) 77 benchmark_args.append('--trace-output=' + device_output_file)
74 else: 78 else:
75 benchmark_args.append('--trace-output=' + output_file) 79 benchmark_args.append('--trace-output=' + output_file)
76 80
77 for measurement in measurements: 81 for measurement in measurements:
78 benchmark_args.append(measurement['spec']) 82 benchmark_args.append(measurement['spec'])
79 83
80 shell_args = list(shell_args) 84 shell_args = list(shell_args)
81 shell_args.append(_BENCHMARK_APP) 85 shell_args.append(_BENCHMARK_APP)
82 shell_args.append('--force-offline-by-default') 86 shell_args.append('--force-offline-by-default')
83 shell_args.append('--args-for=%s %s' % (_BENCHMARK_APP, 87 shell_args.append('--args-for=%s %s' % (_BENCHMARK_APP,
84 ' '.join(benchmark_args))) 88 ' '.join(benchmark_args)))
85 89
86 if verbose: 90 if verbose:
87 print 'shell arguments: ' + str(shell_args) 91 print 'shell arguments: ' + str(shell_args)
88 return_code, output, did_time_out = shell.run_and_get_output( 92 return_code, output, did_time_out = shell.run_and_get_output(
89 shell_args, timeout=timeout) 93 shell_args, timeout=timeout)
90 94
91 if did_time_out: 95 if did_time_out:
92 return Results(False, 'timed out', output) 96 return Outcome(False, 'timed out', output)
93 if return_code: 97 if return_code:
94 return Results(False, 'return code: ' + str(return_code), output) 98 return Outcome(False, 'return code: ' + str(return_code), output)
95 99
96 # Pull the trace file even if some measurements are missing, as it can be 100 # Pull the trace file even if some measurements are missing, as it can be
97 # useful in debugging. 101 # useful in debugging.
98 if device_output_file: 102 if device_output_file:
99 shell.pull_file(device_output_file, output_file, remove_original=True) 103 shell.pull_file(device_output_file, output_file, remove_original=True)
100 104
101 results = Results(True, None, output) 105 outcome = Outcome(True, None, output)
102 results.measurements = _parse_measurement_results(output) 106 parsed_results = _parse_measurement_results(output)
103 return results 107 for measurement in measurements:
108 spec = measurement['spec']
109 if spec in parsed_results:
110 outcome.results[spec] = parsed_results[spec]
111 else:
112 outcome.some_measurements_failed = True
113 return outcome
OLDNEW
« no previous file with comments | « no previous file | mojo/devtools/common/devtoolslib/perf_dashboard.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698