OLD | NEW |
1 # Copyright 2015 The Chromium Authors. All rights reserved. | 1 # Copyright 2015 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 """API for the perf try job recipe module. | 5 """API for the perf try job recipe module. |
6 | 6 |
7 This API is meant to enable the perf try job recipe on any chromium-supported | 7 This API is meant to enable the perf try job recipe on any chromium-supported |
8 platform for any test that can be run via buildbot, perf or otherwise. | 8 platform for any test that can be run via buildbot, perf or otherwise. |
9 """ | 9 """ |
10 | 10 |
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
169 self.m.gclient.c.revisions['src'] = str(revision) | 169 self.m.gclient.c.revisions['src'] = str(revision) |
170 update_step = self.m.bot_update.ensure_checkout( | 170 update_step = self.m.bot_update.ensure_checkout( |
171 suffix=str(revision), patch=False, update_presentation=False) | 171 suffix=str(revision), patch=False, update_presentation=False) |
172 assert update_step.json.output['did_run'] | 172 assert update_step.json.output['did_run'] |
173 self.m.chromium.runhooks(name='runhooks on %s' % str(revision)) | 173 self.m.chromium.runhooks(name='runhooks on %s' % str(revision)) |
174 | 174 |
175 return update_step | 175 return update_step |
176 | 176 |
177 def _run_test(self, cfg, **kwargs): | 177 def _run_test(self, cfg, **kwargs): |
178 """Runs test from config and return results.""" | 178 """Runs test from config and return results.""" |
179 values, overall_output, retcodes = self.m.bisect_tester.run_test( | 179 all_values = self.m.bisect_tester.run_test( |
180 cfg, **kwargs) | 180 cfg, **kwargs) |
181 all_values = self.m.bisect_tester.digest_run_results(values, retcodes, cfg) | |
182 overall_success = True | 181 overall_success = True |
183 if (not kwargs.get('allow_flakes', True) and | 182 if (not kwargs.get('allow_flakes', True) and |
184 cfg.get('test_type', 'perf') != 'return_code'): | 183 cfg.get('test_type', 'perf') != 'return_code'): |
185 overall_success = all(v == 0 for v in retcodes) | 184 overall_success = all(v == 0 for v in all_values['retcodes']) |
186 return { | 185 return { |
187 'results': all_values, | 186 'results': all_values, |
188 'ret_code': overall_success, | 187 'ret_code': overall_success, |
189 'output': ''.join(overall_output) | 188 'output': ''.join(all_values['output']) |
190 } | 189 } |
191 | 190 |
192 def _build_and_run_tests(self, cfg, update_step, bot_db, revision_hash, | 191 def _build_and_run_tests(self, cfg, update_step, bot_db, revision_hash, |
193 **kwargs): | 192 **kwargs): |
194 """Compiles binaries and runs tests for a given a revision.""" | 193 """Compiles binaries and runs tests for a given a revision.""" |
195 with_patch = kwargs.get('name') == 'With Patch' | 194 with_patch = kwargs.get('name') == 'With Patch' |
196 update_step = self._checkout_revision(update_step, bot_db, revision_hash) | 195 update_step = self._checkout_revision(update_step, bot_db, revision_hash) |
197 if update_step.presentation.properties: | 196 if update_step.presentation.properties: |
198 revision_hash = update_step.presentation.properties['got_revision'] | 197 revision_hash = update_step.presentation.properties['got_revision'] |
199 revision = build_state.BuildState(self, revision_hash, with_patch) | 198 revision = build_state.BuildState(self, revision_hash, with_patch) |
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
275 if 'good_revision' not in config and 'bad_revision' not in config: | 274 if 'good_revision' not in config and 'bad_revision' not in config: |
276 return (None, None) | 275 return (None, None) |
277 return (self._get_hash(config.get('bad_revision')), | 276 return (self._get_hash(config.get('bad_revision')), |
278 self._get_hash(config.get('good_revision'))) | 277 self._get_hash(config.get('good_revision'))) |
279 | 278 |
280 def _compare_and_present_results( | 279 def _compare_and_present_results( |
281 self, cfg, results_without_patch, results_with_patch, labels): | 280 self, cfg, results_without_patch, results_with_patch, labels): |
282 """Parses results and creates Results step.""" | 281 """Parses results and creates Results step.""" |
283 output_with_patch = results_with_patch.get('output') | 282 output_with_patch = results_with_patch.get('output') |
284 output_without_patch = results_without_patch.get('output') | 283 output_without_patch = results_without_patch.get('output') |
285 values_with_patch = results_with_patch.get('results').get('values') | 284 values_with_patch = self.parse_values_only( |
286 values_without_patch = results_without_patch.get('results').get('values') | 285 results_with_patch.get('results'), |
| 286 cfg.get('metric'), |
| 287 _output_format(cfg.get('command')), |
| 288 step_test_data=lambda: self.m.json.test_api.output_stream([1, 1, 1])) |
| 289 values_without_patch = self.parse_values_only( |
| 290 results_without_patch.get('results'), |
| 291 cfg.get('metric'), |
| 292 _output_format(cfg.get('command')), |
| 293 step_test_data=lambda: self.m.json.test_api.output_stream([9, 9, 9])) |
287 | 294 |
288 cloud_links_without_patch = self.parse_cloud_links(output_without_patch) | 295 cloud_links_without_patch = self.parse_cloud_links(output_without_patch) |
289 cloud_links_with_patch = self.parse_cloud_links(output_with_patch) | 296 cloud_links_with_patch = self.parse_cloud_links(output_with_patch) |
290 | 297 |
291 results_link = (cloud_links_without_patch['html'][0] | 298 results_link = (cloud_links_without_patch['html'][0] |
292 if cloud_links_without_patch['html'] else '') | 299 if cloud_links_without_patch['html'] else '') |
293 | 300 |
294 if not values_with_patch or not values_without_patch: | 301 if not values_with_patch or not values_without_patch: |
295 step_result = self.m.step('Results', []) | 302 step_result = self.m.step('Results', []) |
296 step_result.presentation.step_text = ( | 303 step_result.presentation.step_text = ( |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
364 'profiler': profiler_pattern.findall(output), | 371 'profiler': profiler_pattern.findall(output), |
365 } | 372 } |
366 return results | 373 return results |
367 | 374 |
368 | 375 |
369 def get_result(self, config, results_without_patch, results_with_patch, | 376 def get_result(self, config, results_without_patch, results_with_patch, |
370 labels): | 377 labels): |
371 """Returns the results as a dict.""" | 378 """Returns the results as a dict.""" |
372 output_with_patch = results_with_patch.get('output') | 379 output_with_patch = results_with_patch.get('output') |
373 output_without_patch = results_without_patch.get('output') | 380 output_without_patch = results_without_patch.get('output') |
374 values_with_patch = results_with_patch.get('results').get('values') | 381 values_with_patch = self.parse_values_only( |
375 values_without_patch = results_without_patch.get('results').get('values') | 382 results_with_patch.get('results'), |
| 383 config.get('metric'), |
| 384 _output_format(config.get('command')), |
| 385 step_test_data=lambda: self.m.json.test_api.output_stream([1, 1, 1])) |
| 386 values_without_patch = self.parse_values_only( |
| 387 results_without_patch.get('results'), |
| 388 config.get('metric'), |
| 389 _output_format(config.get('command')), |
| 390 step_test_data=lambda: self.m.json.test_api.output_stream([9, 9, 9])) |
376 | 391 |
377 cloud_links_without_patch = self.parse_cloud_links(output_without_patch) | 392 cloud_links_without_patch = self.parse_cloud_links(output_without_patch) |
378 cloud_links_with_patch = self.parse_cloud_links(output_with_patch) | 393 cloud_links_with_patch = self.parse_cloud_links(output_with_patch) |
379 | 394 |
380 cloud_link = (cloud_links_without_patch['html'][0] | 395 cloud_link = (cloud_links_without_patch['html'][0] |
381 if cloud_links_without_patch['html'] else '') | 396 if cloud_links_without_patch['html'] else '') |
382 | 397 |
383 results = { | 398 results = { |
384 'try_job_id': config.get('try_job_id'), | 399 'try_job_id': config.get('try_job_id'), |
385 'status': 'completed', # TODO(chrisphan) Get partial results state. | 400 'status': 'completed', # TODO(chrisphan) Get partial results state. |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
443 | 458 |
444 def _get_build_url(self): | 459 def _get_build_url(self): |
445 properties = self.m.properties | 460 properties = self.m.properties |
446 bot_url = properties.get('buildbotURL', | 461 bot_url = properties.get('buildbotURL', |
447 'http://build.chromium.org/p/chromium/') | 462 'http://build.chromium.org/p/chromium/') |
448 builder_name = urllib.quote(properties.get('buildername', '')) | 463 builder_name = urllib.quote(properties.get('buildername', '')) |
449 builder_number = str(properties.get('buildnumber', '')) | 464 builder_number = str(properties.get('buildnumber', '')) |
450 return '%sbuilders/%s/builds/%s' % (bot_url, builder_name, builder_number) | 465 return '%sbuilders/%s/builds/%s' % (bot_url, builder_name, builder_number) |
451 | 466 |
452 | 467 |
| 468 def parse_values_only(self, results, metric, output_format, **kwargs): |
| 469 """Parse the values for a given metric for the given results. |
| 470 |
| 471 This is meant to be used by tryjobs with a metric.""" |
| 472 if not metric: |
| 473 return None |
| 474 |
| 475 if output_format == 'buildbot': |
| 476 files = results['stdout_paths'] |
| 477 elif output_format == 'chartjson': |
| 478 files = results['chartjson_paths'] |
| 479 elif output_format == 'valueset': |
| 480 files = results['valueset_paths'] |
| 481 else: # pragma: no cover |
| 482 raise self.m.step.StepFailure('Unsupported format: ' + output_format) |
| 483 |
| 484 # Apply str to files to constrain cmdline args to ascii, as this used to |
| 485 # break when unicode things were passed instead. |
| 486 args = [','.join(map(str, files)), str(metric), '--' + output_format] |
| 487 script = self.m.path['catapult'].join( |
| 488 'tracing', 'bin', 'parse_metric_cmdline') |
| 489 return self.m.python( |
| 490 'Parse metric values', |
| 491 script=script, |
| 492 args=args, |
| 493 stdout=self.m.json.output(), |
| 494 **kwargs).stdout |
| 495 |
| 496 |
453 def _validate_perf_config(config_contents, required_parameters): | 497 def _validate_perf_config(config_contents, required_parameters): |
454 """Validates the perf config file contents. | 498 """Validates the perf config file contents. |
455 | 499 |
456 This is used when we're doing a perf try job, the config file is called | 500 This is used when we're doing a perf try job, the config file is called |
457 run-perf-test.cfg by default. | 501 run-perf-test.cfg by default. |
458 | 502 |
459 The parameters checked are the required parameters; any additional optional | 503 The parameters checked are the required parameters; any additional optional |
460 parameters won't be checked and validation will still pass. | 504 parameters won't be checked and validation will still pass. |
461 | 505 |
462 Args: | 506 Args: |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
505 | 549 |
506 def _prepend_src_to_path_in_command(test_cfg): | 550 def _prepend_src_to_path_in_command(test_cfg): |
507 command_to_run = [] | 551 command_to_run = [] |
508 for v in test_cfg.get('command').split(): | 552 for v in test_cfg.get('command').split(): |
509 if v in ['./tools/perf/run_benchmark', | 553 if v in ['./tools/perf/run_benchmark', |
510 'tools/perf/run_benchmark', | 554 'tools/perf/run_benchmark', |
511 'tools\\perf\\run_benchmark']: | 555 'tools\\perf\\run_benchmark']: |
512 v = 'src/tools/perf/run_benchmark' | 556 v = 'src/tools/perf/run_benchmark' |
513 command_to_run.append(v) | 557 command_to_run.append(v) |
514 test_cfg.update({'command': ' '.join(command_to_run)}) | 558 test_cfg.update({'command': ' '.join(command_to_run)}) |
| 559 |
| 560 def _output_format(command): |
| 561 """Determine the output format for a given command.""" |
| 562 if 'chartjson' in command: |
| 563 return 'chartjson' |
| 564 elif 'valueset' in command: |
| 565 return 'valueset' |
| 566 return 'buildbot' |
OLD | NEW |