Index: scripts/slave/recipe_modules/perf_try_staging/api.py |
diff --git a/scripts/slave/recipe_modules/perf_try_staging/api.py b/scripts/slave/recipe_modules/perf_try_staging/api.py |
index dbad182e353b643938b5da6485d674e0d1554cc1..10b468dc46f9b43ae78df82da1b4f8c6a723d95a 100644 |
--- a/scripts/slave/recipe_modules/perf_try_staging/api.py |
+++ b/scripts/slave/recipe_modules/perf_try_staging/api.py |
@@ -176,17 +176,16 @@ class PerfTryJobApi(recipe_api.RecipeApi): |
def _run_test(self, cfg, **kwargs): |
"""Runs test from config and return results.""" |
- values, overall_output, retcodes = self.m.bisect_tester_staging.run_test( |
+ all_values = self.m.bisect_tester_staging.run_test( |
cfg, **kwargs) |
- all_values = self.m.bisect_tester_staging.digest_run_results(values, retcodes, cfg) |
overall_success = True |
if (not kwargs.get('allow_flakes', True) and |
cfg.get('test_type', 'perf') != 'return_code'): |
- overall_success = all(v == 0 for v in retcodes) |
+ overall_success = all(v == 0 for v in all_values['retcodes']) |
return { |
'results': all_values, |
'ret_code': overall_success, |
- 'output': ''.join(overall_output) |
+ 'output': ''.join(all_values['output']) |
} |
def _build_and_run_tests(self, cfg, update_step, bot_db, revision_hash, |
@@ -282,8 +281,16 @@ class PerfTryJobApi(recipe_api.RecipeApi): |
"""Parses results and creates Results step.""" |
output_with_patch = results_with_patch.get('output') |
output_without_patch = results_without_patch.get('output') |
- values_with_patch = results_with_patch.get('results').get('values') |
- values_without_patch = results_without_patch.get('results').get('values') |
+ values_with_patch = self.parse_values_only( |
+ results_with_patch.get('results'), |
+ cfg.get('metric'), |
+ _output_format(cfg.get('command')), |
+ step_test_data=lambda: self.m.json.test_api.output_stream([1, 1, 1])) |
+ values_without_patch = self.parse_values_only( |
+ results_without_patch.get('results'), |
+ cfg.get('metric'), |
+ _output_format(cfg.get('command')), |
+ step_test_data=lambda: self.m.json.test_api.output_stream([9, 9, 9])) |
cloud_links_without_patch = self.parse_cloud_links(output_without_patch) |
cloud_links_with_patch = self.parse_cloud_links(output_with_patch) |
@@ -371,8 +378,16 @@ class PerfTryJobApi(recipe_api.RecipeApi): |
"""Returns the results as a dict.""" |
output_with_patch = results_with_patch.get('output') |
output_without_patch = results_without_patch.get('output') |
- values_with_patch = results_with_patch.get('results').get('values') |
- values_without_patch = results_without_patch.get('results').get('values') |
+ values_with_patch = self.parse_values_only( |
+ results_with_patch.get('results'), |
+ config.get('metric'), |
+ _output_format(config.get('command')), |
+ step_test_data=lambda: self.m.json.test_api.output_stream([1, 1, 1])) |
+ values_without_patch = self.parse_values_only( |
+ results_without_patch.get('results'), |
+ config.get('metric'), |
+ _output_format(config.get('command')), |
+ step_test_data=lambda: self.m.json.test_api.output_stream([9, 9, 9])) |
cloud_links_without_patch = self.parse_cloud_links(output_without_patch) |
cloud_links_with_patch = self.parse_cloud_links(output_with_patch) |
@@ -450,6 +465,35 @@ class PerfTryJobApi(recipe_api.RecipeApi): |
return '%sbuilders/%s/builds/%s' % (bot_url, builder_name, builder_number) |
+ def parse_values_only(self, results, metric, output_format, **kwargs): |
+ """Parse the values for a given metric for the given results. |
+ |
+ This is meant to be used by tryjobs with a metric.""" |
+ if not metric: |
+ return None |
+ |
+ if output_format == 'buildbot': |
+ files = results['stdout_paths'] |
+ elif output_format == 'chartjson': |
+ files = results['chartjson_paths'] |
+ elif output_format == 'valueset': |
+ files = results['valueset_paths'] |
+ else: # pragma: no cover |
+ raise self.m.step.StepFailure('Unsupported format: ' + output_format) |
+ |
+ # Apply str to files to constrain cmdline args to ascii, as this used to |
+ # break when unicode things were passed instead. |
+ args = [','.join(map(str, files)), str(metric), '--' + output_format] |
+ script = self.m.path['catapult'].join( |
+ 'tracing', 'bin', 'parse_metric_cmdline') |
+ return self.m.python( |
+ 'Parse metric values', |
+ script=script, |
+ args=args, |
+ stdout=self.m.json.output(), |
+ **kwargs).stdout |
+ |
+ |
def _validate_perf_config(config_contents, required_parameters): |
"""Validates the perf config file contents. |
@@ -512,3 +556,11 @@ def _prepend_src_to_path_in_command(test_cfg): |
v = 'src/tools/perf/run_benchmark' |
command_to_run.append(v) |
test_cfg.update({'command': ' '.join(command_to_run)}) |
+ |
+def _output_format(command): |
+ """Determine the output format for a given command.""" |
+ if 'chartjson' in command: |
+ return 'chartjson' |
+ elif 'valueset' in command: |
+ return 'valueset' |
+ return 'buildbot' |