OLD | NEW |
---|---|
(Empty) | |
1 # Copyright 2016 The Chromium Authors. All rights reserved. | |
2 # Use of this source code is governed by a BSD-style license that can be | |
3 # found in the LICENSE file. | |
4 | |
5 import io | |
6 import json | |
7 import logging | |
8 import os | |
9 import pickle | |
10 import shutil | |
11 import tempfile | |
12 import time | |
13 import zipfile | |
14 | |
15 from devil.android import battery_utils | |
16 from devil.android import device_blacklist | |
17 from devil.android import device_errors | |
18 from devil.android import device_list | |
19 from devil.android import device_utils | |
20 from devil.android import forwarder | |
21 from devil.android.tools import device_recovery | |
22 from devil.android.tools import device_status | |
23 from devil.utils import cmd_helper | |
24 from devil.utils import parallelizer | |
25 from pylib import constants | |
26 from pylib.base import base_test_result | |
27 from pylib.constants import host_paths | |
28 from pylib.local.device import local_device_test_run | |
29 | |
30 | |
31 class TestShard(object): | |
32 def __init__( | |
33 self, env, test_instance, device, index, tests, retries=3, timeout=None): | |
34 logging.info('Create shard %s for device %s to run the following tests:', | |
35 index, device) | |
36 for t in tests: | |
37 logging.info(' %s', t) | |
38 self._battery = battery_utils.BatteryUtils(device) | |
39 self._device = device | |
40 self._env = env | |
41 self._index = index | |
42 self._output_dir = None | |
43 self._retries = retries | |
44 self._test_instance = test_instance | |
45 self._tests = tests | |
46 self._timeout = timeout | |
47 | |
48 @local_device_test_run.handle_shard_failures | |
49 def RunTestsOnShard(self): | |
50 results = base_test_result.TestRunResults() | |
51 for test in self._tests: | |
52 tries_left = self._retries | |
53 result_type = None | |
54 while (result_type != base_test_result.ResultType.PASS | |
55 and tries_left > 0): | |
56 try: | |
57 self._TestSetUp(test) | |
58 result_type = self._RunSingleTest(test) | |
59 except device_errors.CommandTimeoutError: | |
60 result_type = base_test_result.ResultType.TIMEOUT | |
61 except device_errors.CommandFailedError: | |
62 logging.exception('Exception when executing %s.', test) | |
63 result_type = base_test_result.ResultType.FAIL | |
64 finally: | |
65 self._TestTearDown() | |
66 if result_type != base_test_result.ResultType.PASS: | |
67 try: | |
68 device_recovery.RecoverDevice(self._device, self._env.blacklist) | |
69 except device_errors.CommandTimeoutError: | |
70 logging.exception( | |
71 'Device failed to recover after failing %s.', test) | |
72 tries_left = tries_left - 1 | |
73 | |
74 results.AddResult(base_test_result.BaseTestResult(test, result_type)) | |
75 return results | |
76 | |
77 def _TestSetUp(self, test): | |
78 if not self._device.IsOnline(): | |
79 msg = 'Device %s is unresponsive.' % str(self._device) | |
80 raise device_errors.DeviceUnreachableError(msg) | |
81 | |
82 logging.info('Charge level: %s%%', | |
83 str(self._battery.GetBatteryInfo().get('level'))) | |
84 if self._test_instance.min_battery_level: | |
85 self._battery.ChargeDeviceToLevel(self._test_instance.min_battery_level) | |
86 | |
87 logging.info('temperature: %s (0.1 C)', | |
88 str(self._battery.GetBatteryInfo().get('temperature'))) | |
89 if self._test_instance.max_battery_temp: | |
90 self._battery.LetBatteryCoolToTemperature( | |
91 self._test_instance.max_battery_temp) | |
92 | |
93 if not self._device.IsScreenOn(): | |
94 self._device.SetScreen(True) | |
95 | |
96 if (self._test_instance.collect_chartjson_data | |
97 or self._tests[test].get('archive_output_dir')): | |
98 self._output_dir = tempfile.mkdtemp() | |
99 | |
100 def _RunSingleTest(self, test): | |
101 self._test_instance.WriteBuildBotJson(self._output_dir) | |
102 | |
103 timeout = self._tests[test].get('timeout', self._timeout) | |
104 cmd = self._CreateCmd(test) | |
105 cwd = os.path.abspath(host_paths.DIR_SOURCE_ROOT) | |
106 | |
107 logging.debug("Running %s with command '%s' on shard %d with timeout %d", | |
108 test, cmd, self._index, timeout) | |
109 | |
110 try: | |
111 start_time = time.time() | |
112 exit_code, output = cmd_helper.GetCmdStatusAndOutputWithTimeout( | |
113 cmd, timeout, cwd=cwd, shell=True) | |
114 end_time = time.time() | |
115 json_output = self._test_instance.ReadChartjsonOutput(self._output_dir) | |
116 if exit_code == 0: | |
117 result_type = base_test_result.ResultType.PASS | |
118 else: | |
119 result_type = base_test_result.ResultType.FAIL | |
120 except cmd_helper.TimeoutError as e: | |
121 end_time = time.time() | |
122 exit_code = -1 | |
123 output = e.output | |
124 json_output = '' | |
125 result_type = base_test_result.ResultType.TIMEOUT | |
126 | |
127 return self._ProcessTestResult(test, cmd, start_time, end_time, exit_code, | |
128 output, json_output, result_type) | |
129 | |
130 def _CreateCmd(self, test): | |
131 cmd = '%s --device %s' % (self._tests[test]['cmd'], str(self._device)) | |
132 if self._output_dir: | |
133 cmd = cmd + ' --output-dir=%s' % self._output_dir | |
134 if self._test_instance.dry_run: | |
135 cmd = 'echo %s' % cmd | |
136 return cmd | |
137 | |
138 def _ProcessTestResult(self, test, cmd, start_time, end_time, exit_code, | |
139 output, json_output, result_type): | |
140 if exit_code is None: | |
141 exit_code = -1 | |
142 logging.info('%s : exit_code=%d in %d secs on device %s', | |
143 test, exit_code, end_time - start_time, | |
144 str(self._device)) | |
145 | |
146 actual_exit_code = exit_code | |
147 if (self._test_instance.flaky_steps | |
148 and test in self._test_instance.flaky_steps): | |
149 exit_code = 0 | |
150 archive_bytes = (self._ArchiveOutputDir() | |
151 if self._tests[test].get('archive_output_dir') | |
152 else None) | |
153 persisted_result = { | |
154 'name': test, | |
155 'output': [output], | |
156 'chartjson': json_output, | |
157 'archive_bytes': archive_bytes, | |
158 'exit_code': exit_code, | |
159 'actual_exit_code': actual_exit_code, | |
160 'result_type': result_type, | |
161 'start_time': start_time, | |
162 'end_time': end_time, | |
163 'total_time': end_time - start_time, | |
164 'device': str(self._device), | |
165 'cmd': cmd, | |
166 } | |
167 self._SaveResult(persisted_result) | |
168 return result_type | |
169 | |
170 def _ArchiveOutputDir(self): | |
171 """Archive all files in the output dir, and return as compressed bytes.""" | |
172 with io.BytesIO() as archive: | |
173 with zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED) as contents: | |
174 num_files = 0 | |
175 for absdir, _, files in os.walk(self._output_dir): | |
176 reldir = os.path.relpath(absdir, self._output_dir) | |
177 for filename in files: | |
178 src_path = os.path.join(absdir, filename) | |
179 # We use normpath to turn './file.txt' into just 'file.txt'. | |
180 dst_path = os.path.normpath(os.path.join(reldir, filename)) | |
181 contents.write(src_path, dst_path) | |
182 num_files += 1 | |
183 if num_files: | |
184 logging.info('%d files in the output dir were archived.', num_files) | |
185 else: | |
186 logging.warning('No files in the output dir. Archive is empty.') | |
187 return archive.getvalue() | |
188 | |
189 @staticmethod | |
190 def _SaveResult(result): | |
191 pickled = os.path.join(constants.PERF_OUTPUT_DIR, result['name']) | |
192 if os.path.exists(pickled): | |
193 with file(pickled, 'r') as f: | |
194 previous = pickle.loads(f.read()) | |
195 result['output'] = previous['output'] + result['output'] | |
196 with file(pickled, 'w') as f: | |
197 f.write(pickle.dumps(result)) | |
198 | |
199 def _TestTearDown(self): | |
200 if self._output_dir: | |
201 shutil.rmtree(self._output_dir, ignore_errors=True) | |
202 self._output_dir = None | |
203 try: | |
204 logging.info('Unmapping device ports for %s.', self._device) | |
205 forwarder.Forwarder.UnmapAllDevicePorts(self._device) | |
206 except Exception: # pylint: disable=broad-except | |
207 logging.exception('Exception when resetting ports.') | |
208 | |
209 | |
210 class LocalDevicePerfTestRun(local_device_test_run.LocalDeviceTestRun): | |
211 def __init__(self, env, test_instance): | |
212 super(LocalDevicePerfTestRun, self).__init__(env, test_instance) | |
213 self._devices = None | |
214 self._env = env | |
215 self._test_buckets = [] | |
216 self._test_instance = test_instance | |
217 self._timeout = None if test_instance.no_timeout else 60 * 60 | |
218 | |
mikecase (-- gone --)
2016/07/12 18:33:14
nit: consider factoring out 60 * 60 into DEFAULT_P
rnephew (Reviews Here)
2016/07/12 18:55:26
Done.
| |
219 def SetUp(self): | |
220 self._devices = self._GetAllDevices(self._env.devices, | |
221 self._test_instance.known_devices_file) | |
222 | |
223 if os.path.exists(constants.PERF_OUTPUT_DIR): | |
224 shutil.rmtree(constants.PERF_OUTPUT_DIR) | |
225 os.makedirs(constants.PERF_OUTPUT_DIR) | |
226 | |
227 def TearDown(self): | |
228 pass | |
229 | |
230 def _GetStepsFromDict(self): | |
231 # From where this is called one of these two must be set. | |
mikecase (-- gone --)
2016/07/12 18:33:14
I think it would be clearer if it was written....
| |
232 if self._test_instance.single_step: | |
233 return { | |
234 'version': 1, | |
235 'steps': { | |
236 'single_step': { | |
237 'device_affinity': 0, | |
238 'cmd': self._test_instance.single_step | |
239 }, | |
240 } | |
241 } | |
242 if self._test_instance.steps: | |
243 with file(self._test_instance.steps, 'r') as f: | |
244 steps = json.load(f) | |
245 if steps['version'] != 1: | |
jbudorick
2016/07/12 18:45:10
Nit: constify this version since you use it here a
rnephew (Reviews Here)
2016/07/12 21:05:47
Done.
| |
246 raise TestDictVersionError( | |
247 'Version is expected to be %d but was %d' % (1, steps['version'])) | |
248 return steps | |
249 raise PerfTestRunGetStepsError( | |
250 'Neither single_step or steps set in test_instance.') | |
251 | |
252 def _SplitTestsByAffinity(self): | |
253 # This splits tests by their device affinity; so that the same tests always | |
jbudorick
2016/07/12 18:45:10
nit: no ;
rnephew (Reviews Here)
2016/07/12 21:05:47
Done.
| |
254 # run on the same devices. This is important for perf tests since different | |
255 # devices might yield slightly different performance results. | |
256 test_dict = self._GetStepsFromDict() | |
257 for test, test_config in test_dict['steps'].iteritems(): | |
258 try: | |
259 affinity = test_config['device_affinity'] | |
260 if len(self._test_buckets) < affinity + 1: | |
261 while len(self._test_buckets) != affinity + 1: | |
262 self._test_buckets.append({}) | |
263 self._test_buckets[affinity][test] = test_config | |
264 except KeyError: | |
265 logging.exception( | |
266 'Test config for %s is bad.\n Config:%s', test, str(test_config)) | |
267 | |
268 @staticmethod | |
269 def _GetAllDevices(active_devices, devices_path): | |
270 try: | |
271 if devices_path: | |
272 devices = [device_utils.DeviceUtils(s) | |
273 for s in device_list.GetPersistentDeviceList(devices_path)] | |
274 if not devices and active_devices: | |
275 logging.warning('%s is empty. Falling back to active devices.', | |
276 devices_path) | |
277 devices = active_devices | |
278 else: | |
279 logging.warning('Known devices file path not being passed. For device ' | |
280 'affinity to work properly, it must be passed.') | |
281 devices = active_devices | |
282 except IOError as e: | |
283 logging.error('Unable to find %s [%s]', devices_path, e) | |
284 devices = active_devices | |
285 return sorted(devices) | |
286 | |
287 def RunTests(self): | |
mikecase (-- gone --)
2016/07/12 18:33:14
#override?
rnephew (Reviews Here)
2016/07/12 18:55:26
Done.
| |
288 # Affinitize the tests. | |
289 self._SplitTestsByAffinity() | |
290 if not self._test_buckets: | |
291 raise local_device_test_run.NoTestsError() | |
292 | |
293 blacklist = (device_blacklist.Blacklist(self._env.blacklist) | |
294 if self._env.blacklist | |
295 else None) | |
296 | |
297 def run_perf_tests(x): | |
mikecase (-- gone --)
2016/07/12 18:33:14
nit: would prefer shard_index or shard_id or affin
rnephew (Reviews Here)
2016/07/12 18:55:26
Done.
| |
298 if device_status.IsBlacklisted(str(self._devices[x]), blacklist): | |
299 logging.warning('Device %s is not active. Will not create shard %s.', | |
300 str(self._devices[x]), x) | |
301 return [] | |
302 s = TestShard(self._env, self._test_instance, self._devices[x], x, | |
303 self._test_buckets[x], retries=self._env.max_tries, | |
304 timeout=self._timeout) | |
305 return s.RunTestsOnShard() | |
306 | |
307 device_indices = range(min(len(self._devices), len(self._test_buckets))) | |
308 shards = parallelizer.Parallelizer(device_indices).pMap(run_perf_tests) | |
309 return shards.pGet(self._timeout) | |
310 | |
311 # override | |
312 def TestPackage(self): | |
313 return 'perf' | |
314 | |
315 # override | |
316 def _CreateShards(self, _tests): | |
317 raise NotImplementedError | |
318 | |
319 # override | |
320 def _GetTests(self): | |
321 return self._test_buckets | |
322 | |
323 # override | |
324 def _RunTest(self, _device, _test): | |
325 raise NotImplementedError | |
326 | |
327 # override | |
328 def _ShouldShard(self): | |
329 return False | |
330 | |
331 | |
332 class OutputJsonList(LocalDevicePerfTestRun): | |
333 def SetUp(self): | |
mikecase (-- gone --)
2016/07/12 18:33:14
not:override
rnephew (Reviews Here)
2016/07/12 18:55:26
Done.
| |
334 pass | |
335 | |
336 def RunTests(self): | |
mikecase (-- gone --)
2016/07/12 18:33:14
nit: #override?
rnephew (Reviews Here)
2016/07/12 18:55:26
Done.
| |
337 result_type = self._test_instance.OutputJsonList() | |
338 result = base_test_result.TestRunResults() | |
339 result.AddResult( | |
340 base_test_result.BaseTestResult('OutputJsonList', result_type)) | |
341 return [result] | |
342 | |
343 # override | |
344 def _CreateShards(self, _tests): | |
345 raise NotImplementedError | |
346 | |
347 # override | |
348 def _RunTest(self, _device, _test): | |
349 raise NotImplementedError | |
350 | |
351 | |
352 class PrintStep(LocalDevicePerfTestRun): | |
353 def SetUp(self): | |
mikecase (-- gone --)
2016/07/12 18:33:14
nit: override
rnephew (Reviews Here)
2016/07/12 18:55:26
Done.
| |
354 pass | |
355 | |
356 def RunTests(self): | |
mikecase (-- gone --)
2016/07/12 18:33:14
nit: #override?
rnephew (Reviews Here)
2016/07/12 18:55:26
Done.
| |
357 result_type = self._test_instance.PrintTestOutput() | |
358 result = base_test_result.TestRunResults() | |
359 result.AddResult( | |
360 base_test_result.BaseTestResult('PrintStep', result_type)) | |
361 return [result] | |
362 | |
363 # override | |
364 def _CreateShards(self, _tests): | |
365 raise NotImplementedError | |
366 | |
367 # override | |
368 def _RunTest(self, _device, _test): | |
369 raise NotImplementedError | |
370 | |
371 | |
372 class TestDictVersionError(Exception): | |
373 pass | |
374 | |
375 class PerfTestRunGetStepsError(Exception): | |
376 pass | |
OLD | NEW |