| OLD | NEW |
| 1 # Copyright 2015 The Chromium Authors. All rights reserved. | 1 # Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 """API for the perf try job recipe module. | 5 """API for the perf try job recipe module. |
| 6 | 6 |
| 7 This API is meant to enable the perf try job recipe on any chromium-supported | 7 This API is meant to enable the perf try job recipe on any chromium-supported |
| 8 platform for any test that can be run via buildbot, perf or otherwise. | 8 platform for any test that can be run via buildbot, perf or otherwise. |
| 9 """ | 9 """ |
| 10 | 10 |
| (...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 170 update_step = self.m.bot_update.ensure_checkout( | 170 update_step = self.m.bot_update.ensure_checkout( |
| 171 suffix=str(revision), force=True, patch=False, | 171 suffix=str(revision), force=True, patch=False, |
| 172 update_presentation=False) | 172 update_presentation=False) |
| 173 assert update_step.json.output['did_run'] | 173 assert update_step.json.output['did_run'] |
| 174 self.m.chromium.runhooks(name='runhooks on %s' % str(revision)) | 174 self.m.chromium.runhooks(name='runhooks on %s' % str(revision)) |
| 175 | 175 |
| 176 return update_step | 176 return update_step |
| 177 | 177 |
| 178 def _run_test(self, cfg, **kwargs): | 178 def _run_test(self, cfg, **kwargs): |
| 179 """Runs test from config and return results.""" | 179 """Runs test from config and return results.""" |
| 180 values, overall_output, retcodes = self.m.bisect_tester.run_test( | 180 run_results = self.m.bisect_tester.run_test( |
| 181 cfg, **kwargs) | 181 cfg, **kwargs) |
| 182 all_values = self.m.bisect_tester.digest_run_results(values, retcodes, cfg) | 182 all_values = self.m.bisect_tester.digest_run_results(run_results, cfg) |
| 183 overall_success = True | 183 overall_success = True |
| 184 if (not kwargs.get('allow_flakes', True) and | 184 if (not kwargs.get('allow_flakes', True) and |
| 185 cfg.get('test_type', 'perf') != 'return_code'): | 185 cfg.get('test_type', 'perf') != 'return_code'): |
| 186 overall_success = all(v == 0 for v in retcodes) | 186 overall_success = all(v == 0 for v in run_results['retcodes']) |
| 187 return { | 187 return { |
| 188 'results': all_values, | 188 'results': all_values, |
| 189 'ret_code': overall_success, | 189 'ret_code': overall_success, |
| 190 'output': ''.join(overall_output) | 190 'output': ''.join(run_results['output']) |
| 191 } | 191 } |
| 192 | 192 |
| 193 def _build_and_run_tests(self, cfg, update_step, bot_db, revision_hash, | 193 def _build_and_run_tests(self, cfg, update_step, bot_db, revision_hash, |
| 194 **kwargs): | 194 **kwargs): |
| 195 """Compiles binaries and runs tests for a given a revision.""" | 195 """Compiles binaries and runs tests for a given a revision.""" |
| 196 with_patch = kwargs.get('name') == 'With Patch' | 196 with_patch = kwargs.get('name') == 'With Patch' |
| 197 update_step = self._checkout_revision(update_step, bot_db, revision_hash) | 197 update_step = self._checkout_revision(update_step, bot_db, revision_hash) |
| 198 if update_step.presentation.properties: | 198 if update_step.presentation.properties: |
| 199 revision_hash = update_step.presentation.properties['got_revision'] | 199 revision_hash = update_step.presentation.properties['got_revision'] |
| 200 revision = build_state.BuildState(self, revision_hash, with_patch) | 200 revision = build_state.BuildState(self, revision_hash, with_patch) |
| (...skipping 301 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 502 | 502 |
| 503 def _prepend_src_to_path_in_command(test_cfg): | 503 def _prepend_src_to_path_in_command(test_cfg): |
| 504 command_to_run = [] | 504 command_to_run = [] |
| 505 for v in test_cfg.get('command').split(): | 505 for v in test_cfg.get('command').split(): |
| 506 if v in ['./tools/perf/run_benchmark', | 506 if v in ['./tools/perf/run_benchmark', |
| 507 'tools/perf/run_benchmark', | 507 'tools/perf/run_benchmark', |
| 508 'tools\\perf\\run_benchmark']: | 508 'tools\\perf\\run_benchmark']: |
| 509 v = 'src/tools/perf/run_benchmark' | 509 v = 'src/tools/perf/run_benchmark' |
| 510 command_to_run.append(v) | 510 command_to_run.append(v) |
| 511 test_cfg.update({'command': ' '.join(command_to_run)}) | 511 test_cfg.update({'command': ' '.join(command_to_run)}) |
| OLD | NEW |