| OLD | NEW | 
|    1 # Copyright 2013 The Chromium Authors. All rights reserved. |    1 # Copyright 2013 The Chromium Authors. All rights reserved. | 
|    2 # Use of this source code is governed by a BSD-style license that can be |    2 # Use of this source code is governed by a BSD-style license that can be | 
|    3 # found in the LICENSE file. |    3 # found in the LICENSE file. | 
|    4  |    4  | 
|    5 """Runs a perf test on a single device. |    5 """Runs perf tests. | 
|    6  |    6  | 
|    7 Our buildbot infrastructure requires each slave to run steps serially. |    7 Our buildbot infrastructure requires each slave to run steps serially. | 
|    8 This is sub-optimal for android, where these steps can run independently on |    8 This is sub-optimal for android, where these steps can run independently on | 
|    9 multiple connected devices. |    9 multiple connected devices. | 
|   10  |   10  | 
|   11 The buildbots will run this script multiple times per cycle: |   11 The buildbots will run this script multiple times per cycle: | 
|   12 - First: all steps listed in --steps in will be executed in parallel using all |   12 - First: all steps listed in --steps in will be executed in parallel using all | 
|   13 connected devices. Step results will be pickled to disk. Each step has a unique |   13 connected devices. Step results will be pickled to disk. Each step has a unique | 
|   14 name. The result code will be ignored if the step name is listed in |   14 name. The result code will be ignored if the step name is listed in | 
|   15 --flaky-steps. |   15 --flaky-steps. | 
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
|   89       device: Device to run the tests. |   89       device: Device to run the tests. | 
|   90       tests: a dict mapping test_name to command. |   90       tests: a dict mapping test_name to command. | 
|   91       flaky_tests: a list of flaky test_name. |   91       flaky_tests: a list of flaky test_name. | 
|   92     """ |   92     """ | 
|   93     super(TestRunner, self).__init__(device, None, 'Release') |   93     super(TestRunner, self).__init__(device, None, 'Release') | 
|   94     self._options = test_options |   94     self._options = test_options | 
|   95     self._tests = tests |   95     self._tests = tests | 
|   96     self._flaky_tests = flaky_tests |   96     self._flaky_tests = flaky_tests | 
|   97  |   97  | 
|   98   @staticmethod |   98   @staticmethod | 
 |   99   def _IsBetter(result): | 
 |  100     if result['actual_exit_code'] == 0: | 
 |  101       return True | 
 |  102     pickled = os.path.join(constants.PERF_OUTPUT_DIR, | 
 |  103                            result['name']) | 
 |  104     if not os.path.exists(pickled): | 
 |  105       return True | 
 |  106     with file(pickled, 'r') as f: | 
 |  107       previous = pickle.loads(f.read()) | 
 |  108     return result['actual_exit_code'] < previous['actual_exit_code'] | 
 |  109  | 
 |  110   @staticmethod | 
|   99   def _SaveResult(result): |  111   def _SaveResult(result): | 
|  100     with file(os.path.join(constants.PERF_OUTPUT_DIR, |  112     if TestRunner._IsBetter(result): | 
|  101                            result['name']), 'w') as f: |  113       with file(os.path.join(constants.PERF_OUTPUT_DIR, | 
|  102       f.write(pickle.dumps(result)) |  114                              result['name']), 'w') as f: | 
 |  115         f.write(pickle.dumps(result)) | 
|  103  |  116  | 
|  104   def _LaunchPerfTest(self, test_name): |  117   def _LaunchPerfTest(self, test_name): | 
|  105     """Runs a perf test. |  118     """Runs a perf test. | 
|  106  |  119  | 
|  107     Args: |  120     Args: | 
|  108       test_name: the name of the test to be executed. |  121       test_name: the name of the test to be executed. | 
|  109  |  122  | 
|  110     Returns: |  123     Returns: | 
|  111       A tuple containing (Output, base_test_result.ResultType) |  124       A tuple containing (Output, base_test_result.ResultType) | 
|  112     """ |  125     """ | 
| (...skipping 15 matching lines...) Expand all  Loading... | 
|  128         env=os.environ) |  141         env=os.environ) | 
|  129     end_time = datetime.datetime.now() |  142     end_time = datetime.datetime.now() | 
|  130     if exit_code is None: |  143     if exit_code is None: | 
|  131       exit_code = -1 |  144       exit_code = -1 | 
|  132     logging.info('%s : exit_code=%d in %d secs at %s', |  145     logging.info('%s : exit_code=%d in %d secs at %s', | 
|  133                  test_name, exit_code, (end_time - start_time).seconds, |  146                  test_name, exit_code, (end_time - start_time).seconds, | 
|  134                  self.device) |  147                  self.device) | 
|  135     result_type = base_test_result.ResultType.FAIL |  148     result_type = base_test_result.ResultType.FAIL | 
|  136     if exit_code == 0: |  149     if exit_code == 0: | 
|  137       result_type = base_test_result.ResultType.PASS |  150       result_type = base_test_result.ResultType.PASS | 
 |  151     actual_exit_code = exit_code | 
|  138     if test_name in self._flaky_tests: |  152     if test_name in self._flaky_tests: | 
|  139       # The exit_code is used at the second stage when printing the |  153       # The exit_code is used at the second stage when printing the | 
|  140       # test output. If the test is flaky, force to "0" to get that step green |  154       # test output. If the test is flaky, force to "0" to get that step green | 
|  141       # whilst still gathering data to the perf dashboards. |  155       # whilst still gathering data to the perf dashboards. | 
|  142       # The result_type is used by the test_dispatcher to retry the test. |  156       # The result_type is used by the test_dispatcher to retry the test. | 
|  143       exit_code = 0 |  157       exit_code = 0 | 
|  144  |  158  | 
|  145     persisted_result = { |  159     persisted_result = { | 
|  146         'name': test_name, |  160         'name': test_name, | 
|  147         'output': output, |  161         'output': output, | 
|  148         'exit_code': exit_code, |  162         'exit_code': exit_code, | 
 |  163         'actual_exit_code': actual_exit_code, | 
|  149         'result_type': result_type, |  164         'result_type': result_type, | 
|  150         'total_time': (end_time - start_time).seconds, |  165         'total_time': (end_time - start_time).seconds, | 
|  151         'device': self.device, |  166         'device': self.device, | 
|  152         'cmd': cmd, |  167         'cmd': cmd, | 
|  153     } |  168     } | 
|  154     self._SaveResult(persisted_result) |  169     self._SaveResult(persisted_result) | 
|  155  |  170  | 
|  156     return (output, result_type) |  171     return (output, result_type) | 
|  157  |  172  | 
|  158   def RunTest(self, test_name): |  173   def RunTest(self, test_name): | 
|  159     """Run a perf test on the device. |  174     """Run a perf test on the device. | 
|  160  |  175  | 
|  161     Args: |  176     Args: | 
|  162       test_name: String to use for logging the test result. |  177       test_name: String to use for logging the test result. | 
|  163  |  178  | 
|  164     Returns: |  179     Returns: | 
|  165       A tuple of (TestRunResults, retry). |  180       A tuple of (TestRunResults, retry). | 
|  166     """ |  181     """ | 
|  167     output, result_type = self._LaunchPerfTest(test_name) |  182     output, result_type = self._LaunchPerfTest(test_name) | 
|  168     results = base_test_result.TestRunResults() |  183     results = base_test_result.TestRunResults() | 
|  169     results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) |  184     results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) | 
|  170     retry = None |  185     retry = None | 
|  171     if not results.DidRunPass(): |  186     if not results.DidRunPass(): | 
|  172       retry = test_name |  187       retry = test_name | 
|  173     return results, retry |  188     return results, retry | 
| OLD | NEW |