OLD | NEW |
(Empty) | |
| 1 # Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. |
| 4 |
| 5 import io |
| 6 import json |
| 7 import logging |
| 8 import os |
| 9 import pickle |
| 10 import shutil |
| 11 import tempfile |
| 12 import time |
| 13 import zipfile |
| 14 |
| 15 from devil.android import battery_utils |
| 16 from devil.android import device_blacklist |
| 17 from devil.android import device_errors |
| 18 from devil.android import device_list |
| 19 from devil.android import device_utils |
| 20 from devil.android import forwarder |
| 21 from devil.android.tools import device_recovery |
| 22 from devil.android.tools import device_status |
| 23 from devil.utils import cmd_helper |
| 24 from devil.utils import parallelizer |
| 25 from pylib import constants |
| 26 from pylib.base import base_test_result |
| 27 from pylib.constants import host_paths |
| 28 from pylib.local.device import local_device_test_run |
| 29 |
| 30 |
| 31 class TestShard(object): |
| 32 def __init__( |
| 33 self, env, test_instance, device, index, tests, retries=3, timeout=None): |
| 34 logging.info('Create shard %s for device %s to run the following tests:', |
| 35 index, device) |
| 36 for t in tests: |
| 37 logging.info(' %s', t) |
| 38 self._battery = battery_utils.BatteryUtils(device) |
| 39 self._device = device |
| 40 self._env = env |
| 41 self._index = index |
| 42 self._output_dir = None |
| 43 self._retries = retries |
| 44 self._test_instance = test_instance |
| 45 self._tests = tests |
| 46 self._timeout = timeout |
| 47 |
| 48 @local_device_test_run.handle_shard_failures |
| 49 def RunTestsOnShard(self): |
| 50 results = base_test_result.TestRunResults() |
| 51 for test in self._tests: |
| 52 tries_left = self._retries |
| 53 result_type = None |
| 54 while (result_type != base_test_result.ResultType.PASS |
| 55 and tries_left > 0): |
| 56 try: |
| 57 self._TestSetUp(test) |
| 58 result_type = self._RunSingleTest(test) |
| 59 except device_errors.CommandTimeoutError: |
| 60 result_type = base_test_result.ResultType.TIMEOUT |
| 61 except device_errors.CommandFailedError: |
| 62 logging.exception('Exception when executing %s.', test) |
| 63 result_type = base_test_result.ResultType.FAIL |
| 64 finally: |
| 65 self._TestTearDown() |
| 66 if result_type != base_test_result.ResultType.PASS: |
| 67 try: |
| 68 device_recovery.RecoverDevice(self._device, self._env.blacklist) |
| 69 except device_errors.CommandTimeoutError: |
| 70 logging.exception( |
| 71 'Device failed to recover after failing %s.', test) |
| 72 tries_left = tries_left - 1 |
| 73 |
| 74 results.AddResult(base_test_result.BaseTestResult(test, result_type)) |
| 75 return results |
| 76 |
| 77 def _TestSetUp(self, test): |
| 78 if not self._device.IsOnline(): |
| 79 msg = 'Device %s is unresponsive.' % str(self._device) |
| 80 raise device_errors.DeviceUnreachableError(msg) |
| 81 |
| 82 logging.info('Charge level: %s%%', |
| 83 str(self._battery.GetBatteryInfo().get('level'))) |
| 84 if self._test_instance.min_battery_level: |
| 85 self._battery.ChargeDeviceToLevel(self._test_instance.min_battery_level) |
| 86 |
| 87 logging.info('temperature: %s (0.1 C)', |
| 88 str(self._battery.GetBatteryInfo().get('temperature'))) |
| 89 if self._test_instance.max_battery_temp: |
| 90 self._battery.LetBatteryCoolToTemperature( |
| 91 self._test_instance.max_battery_temp) |
| 92 |
| 93 if not self._device.IsScreenOn(): |
| 94 self._device.SetScreen(True) |
| 95 |
| 96 if (self._test_instance.collect_chartjson_data |
| 97 or self._tests[test].get('archive_output_dir')): |
| 98 self._output_dir = tempfile.mkdtemp() |
| 99 |
| 100 def _RunSingleTest(self, test): |
| 101 self._test_instance.WriteBuildBotJson(self._output_dir) |
| 102 |
| 103 timeout = self._tests[test].get('timeout', self._timeout) |
| 104 cmd = self._CreateCmd(test) |
| 105 cwd = os.path.abspath(host_paths.DIR_SOURCE_ROOT) |
| 106 |
| 107 logging.debug("Running %s with command '%s' on shard %d with timeout %d", |
| 108 test, cmd, self._index, timeout) |
| 109 |
| 110 try: |
| 111 start_time = time.time() |
| 112 exit_code, output = cmd_helper.GetCmdStatusAndOutputWithTimeout( |
| 113 cmd, timeout, cwd=cwd, shell=True) |
| 114 end_time = time.time() |
| 115 json_output = self._test_instance.ReadChartjsonOutput(self._output_dir) |
| 116 if exit_code == 0: |
| 117 result_type = base_test_result.ResultType.PASS |
| 118 else: |
| 119 result_type = base_test_result.ResultType.FAIL |
| 120 except cmd_helper.TimeoutError as e: |
| 121 end_time = time.time() |
| 122 exit_code = -1 |
| 123 output = e.output |
| 124 json_output = '' |
| 125 result_type = base_test_result.ResultType.TIMEOUT |
| 126 |
| 127 return self._ProcessTestResult(test, cmd, start_time, end_time, exit_code, |
| 128 output, json_output, result_type) |
| 129 |
| 130 def _CreateCmd(self, test): |
| 131 cmd = '%s --device %s' % (self._tests[test]['cmd'], str(self._device)) |
| 132 if self._output_dir: |
| 133 cmd = cmd + ' --output-dir=%s' % self._output_dir |
| 134 if self._test_instance.dry_run: |
| 135 cmd = 'echo %s' % cmd |
| 136 return cmd |
| 137 |
| 138 def _ProcessTestResult(self, test, cmd, start_time, end_time, exit_code, |
| 139 output, json_output, result_type): |
| 140 if exit_code is None: |
| 141 exit_code = -1 |
| 142 logging.info('%s : exit_code=%d in %d secs on device %s', |
| 143 test, exit_code, end_time - start_time, |
| 144 str(self._device)) |
| 145 |
| 146 actual_exit_code = exit_code |
| 147 if (self._test_instance.flaky_steps |
| 148 and test in self._test_instance.flaky_steps): |
| 149 exit_code = 0 |
| 150 archive_bytes = (self._ArchiveOutputDir() |
| 151 if self._tests[test].get('archive_output_dir') |
| 152 else None) |
| 153 persisted_result = { |
| 154 'name': test, |
| 155 'output': [output], |
| 156 'chartjson': json_output, |
| 157 'archive_bytes': archive_bytes, |
| 158 'exit_code': exit_code, |
| 159 'actual_exit_code': actual_exit_code, |
| 160 'result_type': result_type, |
| 161 'start_time': start_time, |
| 162 'end_time': end_time, |
| 163 'total_time': end_time - start_time, |
| 164 'device': str(self._device), |
| 165 'cmd': cmd, |
| 166 } |
| 167 self._SaveResult(persisted_result) |
| 168 return result_type |
| 169 |
| 170 def _ArchiveOutputDir(self): |
| 171 """Archive all files in the output dir, and return as compressed bytes.""" |
| 172 with io.BytesIO() as archive: |
| 173 with zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED) as contents: |
| 174 num_files = 0 |
| 175 for absdir, _, files in os.walk(self._output_dir): |
| 176 reldir = os.path.relpath(absdir, self._output_dir) |
| 177 for filename in files: |
| 178 src_path = os.path.join(absdir, filename) |
| 179 # We use normpath to turn './file.txt' into just 'file.txt'. |
| 180 dst_path = os.path.normpath(os.path.join(reldir, filename)) |
| 181 contents.write(src_path, dst_path) |
| 182 num_files += 1 |
| 183 if num_files: |
| 184 logging.info('%d files in the output dir were archived.', num_files) |
| 185 else: |
| 186 logging.warning('No files in the output dir. Archive is empty.') |
| 187 return archive.getvalue() |
| 188 |
| 189 @staticmethod |
| 190 def _SaveResult(result): |
| 191 pickled = os.path.join(constants.PERF_OUTPUT_DIR, result['name']) |
| 192 if os.path.exists(pickled): |
| 193 with file(pickled, 'r') as f: |
| 194 previous = pickle.loads(f.read()) |
| 195 result['output'] = previous['output'] + result['output'] |
| 196 with file(pickled, 'w') as f: |
| 197 f.write(pickle.dumps(result)) |
| 198 |
| 199 def _TestTearDown(self): |
| 200 if self._output_dir: |
| 201 shutil.rmtree(self._output_dir, ignore_errors=True) |
| 202 self._output_dir = None |
| 203 try: |
| 204 logging.info('Unmapping device ports for %s.', self._device) |
| 205 forwarder.Forwarder.UnmapAllDevicePorts(self._device) |
| 206 except Exception: # pylint: disable=broad-except |
| 207 logging.exception('Exception when resetting ports.') |
| 208 |
| 209 |
| 210 class LocalDevicePerfTestRun(local_device_test_run.LocalDeviceTestRun): |
| 211 |
| 212 _DEFAULT_TIMEOUT = 60 * 60 |
| 213 _CONFIG_VERSION = 1 |
| 214 |
| 215 def __init__(self, env, test_instance): |
| 216 super(LocalDevicePerfTestRun, self).__init__(env, test_instance) |
| 217 self._devices = None |
| 218 self._env = env |
| 219 self._test_buckets = [] |
| 220 self._test_instance = test_instance |
| 221 self._timeout = None if test_instance.no_timeout else self._DEFAULT_TIMEOUT |
| 222 |
| 223 def SetUp(self): |
| 224 self._devices = self._GetAllDevices(self._env.devices, |
| 225 self._test_instance.known_devices_file) |
| 226 |
| 227 if os.path.exists(constants.PERF_OUTPUT_DIR): |
| 228 shutil.rmtree(constants.PERF_OUTPUT_DIR) |
| 229 os.makedirs(constants.PERF_OUTPUT_DIR) |
| 230 |
| 231 def TearDown(self): |
| 232 pass |
| 233 |
| 234 def _GetStepsFromDict(self): |
| 235 # From where this is called one of these two must be set. |
| 236 if self._test_instance.single_step: |
| 237 return { |
| 238 'version': self._CONFIG_VERSION, |
| 239 'steps': { |
| 240 'single_step': { |
| 241 'device_affinity': 0, |
| 242 'cmd': self._test_instance.single_step |
| 243 }, |
| 244 } |
| 245 } |
| 246 if self._test_instance.steps: |
| 247 with file(self._test_instance.steps, 'r') as f: |
| 248 steps = json.load(f) |
| 249 if steps['version'] != self._CONFIG_VERSION: |
| 250 raise TestDictVersionError( |
| 251 'Version is expected to be %d but was %d' % (self._CONFIG_VERSION, |
| 252 steps['version'])) |
| 253 return steps |
| 254 raise PerfTestRunGetStepsError( |
| 255 'Neither single_step or steps set in test_instance.') |
| 256 |
| 257 def _SplitTestsByAffinity(self): |
| 258 # This splits tests by their device affinity so that the same tests always |
| 259 # run on the same devices. This is important for perf tests since different |
| 260 # devices might yield slightly different performance results. |
| 261 test_dict = self._GetStepsFromDict() |
| 262 for test, test_config in test_dict['steps'].iteritems(): |
| 263 try: |
| 264 affinity = test_config['device_affinity'] |
| 265 if len(self._test_buckets) < affinity + 1: |
| 266 while len(self._test_buckets) != affinity + 1: |
| 267 self._test_buckets.append({}) |
| 268 self._test_buckets[affinity][test] = test_config |
| 269 except KeyError: |
| 270 logging.exception( |
| 271 'Test config for %s is bad.\n Config:%s', test, str(test_config)) |
| 272 |
| 273 @staticmethod |
| 274 def _GetAllDevices(active_devices, devices_path): |
| 275 try: |
| 276 if devices_path: |
| 277 devices = [device_utils.DeviceUtils(s) |
| 278 for s in device_list.GetPersistentDeviceList(devices_path)] |
| 279 if not devices and active_devices: |
| 280 logging.warning('%s is empty. Falling back to active devices.', |
| 281 devices_path) |
| 282 devices = active_devices |
| 283 else: |
| 284 logging.warning('Known devices file path not being passed. For device ' |
| 285 'affinity to work properly, it must be passed.') |
| 286 devices = active_devices |
| 287 except IOError as e: |
| 288 logging.error('Unable to find %s [%s]', devices_path, e) |
| 289 devices = active_devices |
| 290 return sorted(devices) |
| 291 |
| 292 #override |
| 293 def RunTests(self): |
| 294 # Affinitize the tests. |
| 295 self._SplitTestsByAffinity() |
| 296 if not self._test_buckets: |
| 297 raise local_device_test_run.NoTestsError() |
| 298 |
| 299 blacklist = (device_blacklist.Blacklist(self._env.blacklist) |
| 300 if self._env.blacklist |
| 301 else None) |
| 302 |
| 303 def run_perf_tests(shard_id): |
| 304 if device_status.IsBlacklisted(str(self._devices[shard_id]), blacklist): |
| 305 logging.warning('Device %s is not active. Will not create shard %s.', |
| 306 str(self._devices[shard_id]), shard_id) |
| 307 return [] |
| 308 s = TestShard(self._env, self._test_instance, self._devices[shard_id], |
| 309 shard_id, self._test_buckets[shard_id], |
| 310 retries=self._env.max_tries, timeout=self._timeout) |
| 311 return s.RunTestsOnShard() |
| 312 |
| 313 device_indices = range(min(len(self._devices), len(self._test_buckets))) |
| 314 shards = parallelizer.Parallelizer(device_indices).pMap(run_perf_tests) |
| 315 return shards.pGet(self._timeout) |
| 316 |
| 317 # override |
| 318 def TestPackage(self): |
| 319 return 'perf' |
| 320 |
| 321 # override |
| 322 def _CreateShards(self, _tests): |
| 323 raise NotImplementedError |
| 324 |
| 325 # override |
| 326 def _GetTests(self): |
| 327 return self._test_buckets |
| 328 |
| 329 # override |
| 330 def _RunTest(self, _device, _test): |
| 331 raise NotImplementedError |
| 332 |
| 333 # override |
| 334 def _ShouldShard(self): |
| 335 return False |
| 336 |
| 337 |
| 338 class OutputJsonList(LocalDevicePerfTestRun): |
| 339 # override |
| 340 def SetUp(self): |
| 341 pass |
| 342 |
| 343 # override |
| 344 def RunTests(self): |
| 345 result_type = self._test_instance.OutputJsonList() |
| 346 result = base_test_result.TestRunResults() |
| 347 result.AddResult( |
| 348 base_test_result.BaseTestResult('OutputJsonList', result_type)) |
| 349 return [result] |
| 350 |
| 351 # override |
| 352 def _CreateShards(self, _tests): |
| 353 raise NotImplementedError |
| 354 |
| 355 # override |
| 356 def _RunTest(self, _device, _test): |
| 357 raise NotImplementedError |
| 358 |
| 359 |
| 360 class PrintStep(LocalDevicePerfTestRun): |
| 361 # override |
| 362 def SetUp(self): |
| 363 pass |
| 364 |
| 365 # override |
| 366 def RunTests(self): |
| 367 result_type = self._test_instance.PrintTestOutput() |
| 368 result = base_test_result.TestRunResults() |
| 369 result.AddResult( |
| 370 base_test_result.BaseTestResult('PrintStep', result_type)) |
| 371 return [result] |
| 372 |
| 373 # override |
| 374 def _CreateShards(self, _tests): |
| 375 raise NotImplementedError |
| 376 |
| 377 # override |
| 378 def _RunTest(self, _device, _test): |
| 379 raise NotImplementedError |
| 380 |
| 381 |
| 382 class TestDictVersionError(Exception): |
| 383 pass |
| 384 |
| 385 class PerfTestRunGetStepsError(Exception): |
| 386 pass |
OLD | NEW |