Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(358)

Unified Diff: scripts/slave/recipe_modules/bisect_tester/perf_test.py

Issue 2247373002: Refactor stages 1, 2 and test_api overhaul. (Closed) Base URL: https://chromium.googlesource.com/chromium/tools/build.git@master
Patch Set: Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: scripts/slave/recipe_modules/bisect_tester/perf_test.py
diff --git a/scripts/slave/recipe_modules/bisect_tester/perf_test.py b/scripts/slave/recipe_modules/bisect_tester/perf_test.py
index f07f33d61fe6bb6c33e6c9e384ff26672eb61c5e..f7de79b17a5fc7fe6c464e8f22bc794c22c8f6b1 100644
--- a/scripts/slave/recipe_modules/bisect_tester/perf_test.py
+++ b/scripts/slave/recipe_modules/bisect_tester/perf_test.py
@@ -79,16 +79,15 @@ def run_perf_test(api, test_config, **kwargs):
# a helper method, or extract the metric-extraction to make this more
# cleaner.
limit = test_config['max_time_minutes'] * kwargs.get('time_multiplier', 1)
- run_results = {'measured_values': [], 'errors': set()}
- values = run_results['measured_values']
+ results = {'valueset_paths': [], 'chartjson_paths': [], 'errors': set(),
+ 'retcodes': [], 'values': [], 'output': []}
metric = test_config.get('metric')
- retcodes = []
- output_for_all_runs = []
temp_dir = None
repeat_cnt = test_config['repeat_count']
command = test_config['command']
use_chartjson = bool('chartjson' in command)
+ use_valueset = bool('valueset' in command)
is_telemetry = _is_telemetry_command(command)
start_time = time.time()
@@ -109,46 +108,59 @@ def run_perf_test(api, test_config, **kwargs):
command += ' --upload-results'
if kwargs.get('results_label'):
command += ' --results-label=%s' % kwargs.get('results_label')
- if use_chartjson: # pragma: no cover
+ if use_chartjson or use_valueset: # pragma: no cover
temp_dir = api.m.path.mkdtemp('perf-test-output')
command = _set_output_dir(command, str(temp_dir))
- results_path = temp_dir.join('results-chart.json')
+ chartjson_path = temp_dir.join('results-chart.json')
+ valueset_path = temp_dir.join('results-valueset.json')
step_name = "Performance Test%s %d of %d" % (
' (%s)' % kwargs['name'] if 'name' in kwargs else '', i + 1, repeat_cnt)
if api.m.platform.is_linux:
os.environ['CHROME_DEVEL_SANDBOX'] = api.m.path.join(
'/opt', 'chromium', 'chrome_sandbox')
- out, err, retcode = _run_command(api, command, step_name)
+ out, err, retcode = _run_command(api, command, step_name, **kwargs)
+ results['output'].append(out or '')
if out is None and err is None:
# dummy value when running test TODO: replace with a mock
- values.append(0)
+ results['values'].append(0)
elif metric: # pragma: no cover
if use_chartjson:
- step_result = api.m.json.read(
- 'Reading chartjson results', results_path)
- has_valid_value, value = find_values(
- step_result.json.output, Metric(metric))
- else:
+ try:
+ step_result = api.m.json.read(
+ 'Reading chartjson results', chartjson_path)
+ except api.m.step.StepFailure:
+ pass
+ else:
+ if step_result.json.output:
+ results['chartjson_paths'].append(chartjson_path)
+ if use_valueset:
+ try:
+ step_result = api.m.json.read(
+ 'Reading valueset results', valueset_path,
+ step_test_data=lambda: api.m.json.test_api.output(
+ {'dummy':'dict'}))
+ except api.m.step.StepFailure:
+ pass
+ else:
+ if step_result.json.output:
+ results['valueset_paths'].append(valueset_path)
+ if not use_valueset and not use_chartjson:
has_valid_value, value = parse_metric.parse_metric(
out, err, metric.split('/'))
- output_for_all_runs.append(out)
- if has_valid_value:
- values.extend(value)
- else:
- # This means the metric was not found in the output.
- if not retcode:
- # If all tests passed, but the metric was not found, this means that
- # something changed on the test, or the given metric name was
- # incorrect, we need to surface this on the bisector.
- run_results['errors'].add('MISSING_METRIC')
- else:
- output_for_all_runs.append(out)
- retcodes.append(retcode)
-
- return run_results, output_for_all_runs, retcodes
-
+ if has_valid_value:
+ results['values'].extend(value)
+ else:
+ # This means the metric was not found in the output.
+ if not retcode:
+ # If all tests passed, but the metric was not found, this means that
+ # something changed on the test, or the given metric name was
+ # incorrect, we need to surface this on the bisector.
+ results['errors'].add('MISSING_METRIC')
+ results['retcodes'].append(retcode)
+
+ return results
def find_values(results, metric): # pragma: no cover
"""Tries to extract the given metric from the given results.
@@ -202,14 +214,13 @@ def _rebase_path(api, file_path):
*file_path.split('src', 1)[1].split('\\')[1:])
return file_path
-def _run_command(api, command, step_name):
+def _run_command(api, command, step_name, **kwargs):
command_parts = command.split()
stdout = api.m.raw_io.output()
stderr = api.m.raw_io.output()
# TODO(prasadv): Remove this once bisect runs are no longer running
# against revisions from February 2016 or earlier.
- kwargs = {}
if 'android-chrome' in command: # pragma: no cover
kwargs['env'] = {'CHROMIUM_OUTPUT_DIR': api.m.chromium.output_dir}

Powered by Google App Engine
This is Rietveld 408576698