Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright 2013 The Chromium Authors. All rights reserved. | 1 # Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 """Runs perf tests. | 5 """Runs perf tests. |
| 6 | 6 |
| 7 Our buildbot infrastructure requires each slave to run steps serially. | 7 Our buildbot infrastructure requires each slave to run steps serially. |
| 8 This is sub-optimal for android, where these steps can run independently on | 8 This is sub-optimal for android, where these steps can run independently on |
| 9 multiple connected devices. | 9 multiple connected devices. |
| 10 | 10 |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 58 import threading | 58 import threading |
| 59 import time | 59 import time |
| 60 | 60 |
| 61 from pylib import cmd_helper | 61 from pylib import cmd_helper |
| 62 from pylib import constants | 62 from pylib import constants |
| 63 from pylib import forwarder | 63 from pylib import forwarder |
| 64 from pylib.base import base_test_result | 64 from pylib.base import base_test_result |
| 65 from pylib.base import base_test_runner | 65 from pylib.base import base_test_runner |
| 66 from pylib.device import device_errors | 66 from pylib.device import device_errors |
| 67 | 67 |
| 68 NUM_DEVICE_AFFINITIES = 8 | |
|
jbudorick
2015/05/23 01:06:49
What if the number of devices is higher than 8? I
luqui
2015/05/27 20:01:12
As far as I could tell there are no specs of devic
shatch
2015/05/28 13:59:17
This isn't a thing yet, but we did ask for some in
jbudorick
2015/05/28 14:03:16
Currently there aren't, and I don't think we have
| |
| 69 | |
| 68 | 70 |
| 69 def OutputJsonList(json_input, json_output): | 71 def OutputJsonList(json_input, json_output): |
| 70 with file(json_input, 'r') as i: | 72 with file(json_input, 'r') as i: |
| 71 all_steps = json.load(i) | 73 all_steps = json.load(i) |
| 72 step_names = all_steps['steps'].keys() | 74 step_names = all_steps['steps'].keys() |
| 73 with file(json_output, 'w') as o: | 75 with file(json_output, 'w') as o: |
| 74 o.write(json.dumps(step_names)) | 76 o.write(json.dumps(step_names)) |
| 75 return 0 | 77 return 0 |
| 76 | 78 |
| 77 | 79 |
| (...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 150 if now - self._tick >= _HeartBeatLogger._PRINT_INTERVAL: | 152 if now - self._tick >= _HeartBeatLogger._PRINT_INTERVAL: |
| 151 self._tick = now | 153 self._tick = now |
| 152 print '--single-step output length %d' % self._len | 154 print '--single-step output length %d' % self._len |
| 153 sys.stdout.flush() | 155 sys.stdout.flush() |
| 154 | 156 |
| 155 def stop(self): | 157 def stop(self): |
| 156 self._stopped.set() | 158 self._stopped.set() |
| 157 | 159 |
| 158 | 160 |
| 159 class TestRunner(base_test_runner.BaseTestRunner): | 161 class TestRunner(base_test_runner.BaseTestRunner): |
| 160 def __init__(self, test_options, device, shard_index, max_shard, tests, | 162 def __init__(self, test_options, device, affinities, tests, flaky_tests): |
| 161 flaky_tests): | |
| 162 """A TestRunner instance runs a perf test on a single device. | 163 """A TestRunner instance runs a perf test on a single device. |
| 163 | 164 |
| 164 Args: | 165 Args: |
| 165 test_options: A PerfOptions object. | 166 test_options: A PerfOptions object. |
| 166 device: Device to run the tests. | 167 device: Device to run the tests. |
| 167 shard_index: the index of this device. | 168 affinities: the list of affinities to run on this shard. |
| 168 max_shards: the maximum shard index. | |
| 169 tests: a dict mapping test_name to command. | 169 tests: a dict mapping test_name to command. |
| 170 flaky_tests: a list of flaky test_name. | 170 flaky_tests: a list of flaky test_name. |
| 171 """ | 171 """ |
| 172 super(TestRunner, self).__init__(device, None, 'Release') | 172 super(TestRunner, self).__init__(device, None, 'Release') |
| 173 self._options = test_options | 173 self._options = test_options |
| 174 self._shard_index = shard_index | 174 self._affinities = affinities |
| 175 self._max_shard = max_shard | |
| 176 self._tests = tests | 175 self._tests = tests |
| 177 self._flaky_tests = flaky_tests | 176 self._flaky_tests = flaky_tests |
| 178 self._output_dir = None | 177 self._output_dir = None |
| 179 | 178 |
| 180 @staticmethod | 179 @staticmethod |
| 181 def _IsBetter(result): | 180 def _IsBetter(result): |
| 182 if result['actual_exit_code'] == 0: | 181 if result['actual_exit_code'] == 0: |
| 183 return True | 182 return True |
| 184 pickled = os.path.join(constants.PERF_OUTPUT_DIR, | 183 pickled = os.path.join(constants.PERF_OUTPUT_DIR, |
| 185 result['name']) | 184 result['name']) |
| 186 if not os.path.exists(pickled): | 185 if not os.path.exists(pickled): |
| 187 return True | 186 return True |
| 188 with file(pickled, 'r') as f: | 187 with file(pickled, 'r') as f: |
| 189 previous = pickle.loads(f.read()) | 188 previous = pickle.loads(f.read()) |
| 190 return result['actual_exit_code'] < previous['actual_exit_code'] | 189 return result['actual_exit_code'] < previous['actual_exit_code'] |
| 191 | 190 |
| 192 @staticmethod | 191 @staticmethod |
| 193 def _SaveResult(result): | 192 def _SaveResult(result): |
| 194 if TestRunner._IsBetter(result): | 193 if TestRunner._IsBetter(result): |
| 195 with file(os.path.join(constants.PERF_OUTPUT_DIR, | 194 with file(os.path.join(constants.PERF_OUTPUT_DIR, |
| 196 result['name']), 'w') as f: | 195 result['name']), 'w') as f: |
| 197 f.write(pickle.dumps(result)) | 196 f.write(pickle.dumps(result)) |
| 198 | 197 |
| 199 def _CheckDeviceAffinity(self, test_name): | 198 def _CheckDeviceAffinity(self, test_name): |
| 200 """Returns True if test_name has affinity for this shard.""" | 199 """Returns True if test_name has affinity for this shard.""" |
| 201 affinity = (self._tests['steps'][test_name]['device_affinity'] % | 200 affinity = self._tests['steps'][test_name]['device_affinity'] |
| 202 self._max_shard) | 201 assert 0 <= affinity and affinity < NUM_DEVICE_AFFINITIES, ( |
|
jbudorick
2015/05/23 01:06:49
This should be an exception. We generally avoid as
luqui
2015/05/27 20:01:12
Done.
| |
| 203 if self._shard_index == affinity: | 202 'Got out-of-range device affinity %s' % affinity) |
| 203 if affinity in self._affinities: | |
| 204 return True | 204 return True |
| 205 logging.info('Skipping %s on %s (affinity is %s, device is %s)', | 205 logging.info( |
| 206 test_name, self.device_serial, affinity, self._shard_index) | 206 'Skipping %s on %s (affinity is %s, allowed affinities are %s)', |
| 207 test_name, self.device_serial, affinity, self._affinities) | |
| 207 return False | 208 return False |
| 208 | 209 |
| 209 def _CleanupOutputDirectory(self): | 210 def _CleanupOutputDirectory(self): |
| 210 if self._output_dir: | 211 if self._output_dir: |
| 211 shutil.rmtree(self._output_dir, ignore_errors=True) | 212 shutil.rmtree(self._output_dir, ignore_errors=True) |
| 212 self._output_dir = None | 213 self._output_dir = None |
| 213 | 214 |
| 214 def _ReadChartjsonOutput(self): | 215 def _ReadChartjsonOutput(self): |
| 215 if not self._output_dir: | 216 if not self._output_dir: |
| 216 return '' | 217 return '' |
| (...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 327 Returns: | 328 Returns: |
| 328 A tuple of (TestRunResults, retry). | 329 A tuple of (TestRunResults, retry). |
| 329 """ | 330 """ |
| 330 _, result_type = self._LaunchPerfTest(test_name) | 331 _, result_type = self._LaunchPerfTest(test_name) |
| 331 results = base_test_result.TestRunResults() | 332 results = base_test_result.TestRunResults() |
| 332 results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) | 333 results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) |
| 333 retry = None | 334 retry = None |
| 334 if not results.DidRunPass(): | 335 if not results.DidRunPass(): |
| 335 retry = test_name | 336 retry = test_name |
| 336 return results, retry | 337 return results, retry |
| OLD | NEW |