OLD | NEW |
---|---|
(Empty) | |
1 # Copyright 2016 The Chromium Authors. All rights reserved. | |
2 # Use of this source code is governed by a BSD-style license that can be | |
3 # found in the LICENSE file. | |
4 | |
5 import io | |
6 import itertools | |
7 import json | |
8 import logging | |
9 import os | |
10 import pickle | |
11 import shutil | |
12 import tempfile | |
13 import time | |
14 import zipfile | |
15 | |
16 from devil.android import battery_utils | |
17 from devil.android import device_blacklist | |
18 from devil.android import device_errors | |
19 from devil.android import device_list | |
20 from devil.android import device_utils | |
21 from devil.android import forwarder | |
22 from devil.android.tools import device_recovery | |
23 from devil.android.tools import device_status | |
24 from devil.utils import cmd_helper | |
25 from devil.utils import parallelizer | |
26 from devil.utils import watchdog_timer | |
27 from pylib import constants | |
28 from pylib.base import base_test_result | |
29 from pylib.constants import host_paths | |
30 from pylib.local.device import local_device_test_run | |
31 | |
32 | |
33 class TestShard(object): | |
34 def __init__(self, env, test_instance, device, index, tests, watcher=None, | |
35 retries=3, timeout=None): | |
36 logging.info('Create shard %s for device %s to run the following tests:', | |
37 index, device) | |
38 for t in tests: | |
39 logging.info(' %s', t) | |
40 self._battery = battery_utils.BatteryUtils(device) | |
41 self._device = device | |
42 self._env = env | |
43 self._index = index | |
44 self._output_dir = None | |
45 self._results = [] | |
46 self._retries = retries | |
47 self._test_instance = test_instance | |
48 self._tests = tests | |
49 self._timeout = timeout | |
50 self._watcher = watcher | |
51 | |
52 @local_device_test_run.handle_shard_failures | |
53 def RunTestsOnShard(self): | |
54 for test in self._tests: | |
55 try: | |
56 tries_left = self._retries | |
57 result_type = None | |
58 while (result_type != base_test_result.ResultType.PASS | |
59 and tries_left > 0): | |
60 try: | |
61 self._TestSetUp(test) | |
62 result_type = self._RunSingleTest(test) | |
63 except Exception: # pylint: disable=broad-except | |
jbudorick
2016/07/01 14:20:10
Do we need to catch Exception? Can we catch someth
rnephew (Reviews Here)
2016/07/01 22:09:32
I want it to catch just about anything so that it
| |
64 logging.exception('Exception when executing %s.', test) | |
65 result_type = base_test_result.ResultType.FAIL | |
66 finally: | |
67 if result_type != base_test_result.ResultType.PASS: | |
jbudorick
2016/07/01 14:20:10
Is this the right condition to attempt device reco
rnephew (Reviews Here)
2016/07/01 22:09:32
I think that would not catch the issue where the f
jbudorick
2016/07/06 19:12:09
Acknowledged. Fine with this for now, but we shoul
| |
68 try: | |
69 device_recovery.RecoverDevice(self._device, self._env.blacklist) | |
70 except device_errors.CommandTimeoutError: | |
71 logging.exception( | |
72 'Device failed to recover after failing %s.', test) | |
73 tries_left = tries_left - 1 | |
74 self._TestTearDown() | |
75 result = base_test_result.TestRunResults() | |
76 result.AddResult(base_test_result.BaseTestResult(test, result_type)) | |
77 self._results.append(result) | |
78 finally: | |
79 if self._output_dir: | |
jbudorick
2016/07/01 14:20:10
self._output_dir seems to be handled pretty strang
rnephew (Reviews Here)
2016/07/01 22:09:32
Done.
| |
80 shutil.rmtree(self._output_dir, ignore_errors=True) | |
81 self._output_dir = None | |
82 return self._results | |
83 | |
84 def _TestSetUp(self, test): | |
85 if self._watcher: | |
86 self._watcher.Reset() | |
87 | |
88 logging.info('Charge level: %s%%', | |
89 str(self._battery.GetBatteryInfo().get('level'))) | |
90 if self._test_instance.min_battery_level: | |
91 self._battery.ChargeDeviceToLevel(self._test_instance.min_battery_level) | |
92 | |
93 logging.info('temperature: %s (0.1 C)', | |
94 str(self._battery.GetBatteryInfo().get('temperature'))) | |
95 if self._test_instance.max_battery_temp: | |
96 self._battery.LetBatteryCoolToTemperature( | |
97 self._test_instance.max_battery_temp) | |
98 | |
99 if not self._device.IsScreenOn(): | |
100 self._device.SetScreen(True) | |
101 | |
102 if not self._device.IsOnline(): | |
jbudorick
2016/07/01 14:20:10
Wouldn't we run into issues earlier in this functi
rnephew (Reviews Here)
2016/07/01 22:09:32
Done.
| |
103 msg = 'Device %s is unresponsive.' % str(self._device) | |
104 raise device_errors.DeviceUnreachableError(msg) | |
105 if self._output_dir: | |
jbudorick
2016/07/01 14:20:10
Following from the above, I don't think this shoul
rnephew (Reviews Here)
2016/07/01 22:09:31
Done.
| |
106 shutil.rmtree(self._output_dir) | |
107 if (self._test_instance.collect_chartjson_data | |
108 or self._tests[test].get('archive_output_dir')): | |
109 self._output_dir = tempfile.mkdtemp() | |
110 if self._watcher: | |
jbudorick
2016/07/01 14:20:10
self._watcher gets reset twice in here?
Also, the
rnephew (Reviews Here)
2016/07/01 22:09:32
Done.
| |
111 self._watcher.Reset() | |
112 | |
113 def _RunSingleTest(self, test): | |
114 | |
115 logging.info('Running %s on shard %d', test, self._index) | |
116 timeout = ( | |
117 None if self._test_instance.no_timeout | |
jbudorick
2016/07/01 14:20:10
I *think* this is redundant, as the test run's alr
rnephew (Reviews Here)
2016/07/01 22:09:32
Done.
| |
118 else self._tests[test].get('timeout', self._timeout)) | |
119 logging.info('Timeout for %s test: %d', test, timeout) | |
120 | |
121 cmd = self._CreateCmd(test) | |
122 self._test_instance.WriteBuildBotJson(self._output_dir) | |
123 cwd = os.path.abspath(host_paths.DIR_SOURCE_ROOT) | |
124 | |
125 try: | |
126 logging.debug("Running test with command '%s'", cmd) | |
127 start_time = time.time() | |
128 exit_code, output = cmd_helper.GetCmdStatusAndOutputWithTimeout( | |
129 cmd, timeout, cwd=cwd, shell=True) | |
130 end_time = time.time() | |
131 json_output = self._test_instance.ReadChartjsonOutput(self._output_dir) | |
132 except cmd_helper.TimeoutError as e: | |
jbudorick
2016/07/01 14:20:10
It'd be good if this could be propagated down to r
rnephew (Reviews Here)
2016/07/01 22:09:31
Done.
| |
133 exit_code = -1 | |
134 output = e.output | |
135 json_output = '' | |
136 | |
137 return self._ProcessTestResult( | |
138 test, cmd, start_time, end_time, exit_code, output, json_output) | |
139 | |
140 def _CreateCmd(self, test): | |
141 cmd = '%s --device %s' % (self._tests[test]['cmd'], str(self._device)) | |
142 if self._output_dir: | |
143 cmd = cmd + ' --output-dir=%s' % self._output_dir | |
144 if self._test_instance.dry_run: | |
145 cmd = 'echo %s' % cmd | |
146 return cmd | |
147 | |
jbudorick
2016/07/01 14:20:10
nit: -1 line
rnephew (Reviews Here)
2016/07/01 22:09:32
Done.
| |
148 | |
149 def _ProcessTestResult( | |
150 self, test, cmd, start_time, end_time, exit_code, output, json_output): | |
151 if exit_code is None: | |
152 exit_code = -1 | |
153 logging.info('%s : exit_code=%d in %d secs on device %s', | |
154 test, exit_code, end_time - start_time, | |
155 str(self._device)) | |
156 if exit_code == 0: | |
157 result_type = base_test_result.ResultType.PASS | |
158 else: | |
159 result_type = base_test_result.ResultType.FAIL | |
160 actual_exit_code = exit_code | |
161 if (self._test_instance.flaky_steps | |
162 and test in self._test_instance.flaky_steps): | |
163 exit_code = 0 | |
164 archive_bytes = (self._ArchiveOutputDir() | |
165 if self._tests[test].get('archive_output_dir') | |
166 else None) | |
167 persisted_result = { | |
168 'name': test, | |
169 'output': [output], | |
170 'chartjson': json_output, | |
171 'archive_bytes': archive_bytes, | |
172 'exit_code': exit_code, | |
173 'actual_exit_code': actual_exit_code, | |
174 'result_type': result_type, | |
175 'start_time': start_time, | |
176 'end_time': end_time, | |
177 'total_time': end_time - start_time, | |
178 'device': str(self._device), | |
179 'cmd': cmd, | |
180 } | |
181 self._SaveResult(persisted_result) | |
182 return result_type | |
183 | |
184 def _ArchiveOutputDir(self): | |
185 """Archive all files in the output dir, and return as compressed bytes.""" | |
186 with io.BytesIO() as archive: | |
187 with zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED) as contents: | |
188 num_files = 0 | |
189 for absdir, _, files in os.walk(self._output_dir): | |
190 reldir = os.path.relpath(absdir, self._output_dir) | |
191 for filename in files: | |
192 src_path = os.path.join(absdir, filename) | |
193 # We use normpath to turn './file.txt' into just 'file.txt'. | |
194 dst_path = os.path.normpath(os.path.join(reldir, filename)) | |
195 contents.write(src_path, dst_path) | |
196 num_files += 1 | |
197 if num_files: | |
198 logging.info('%d files in the output dir were archived.', num_files) | |
199 else: | |
200 logging.warning('No files in the output dir. Archive is empty.') | |
201 return archive.getvalue() | |
202 | |
203 @staticmethod | |
204 def _SaveResult(result): | |
205 pickled = os.path.join(constants.PERF_OUTPUT_DIR, result['name']) | |
206 if os.path.exists(pickled): | |
207 with file(pickled, 'r') as f: | |
208 previous = pickle.loads(f.read()) | |
209 result['output'] = previous['output'] + result['output'] | |
210 with file(pickled, 'w') as f: | |
211 f.write(pickle.dumps(result)) | |
212 | |
213 def _TestTearDown(self): | |
214 try: | |
215 logging.info('Unmapping device ports for %s.', self._device) | |
216 forwarder.Forwarder.UnmapAllDevicePorts(self._device) | |
217 except Exception: # pylint: disable=broad-except | |
218 logging.exception('Exception when resetting ports.') | |
219 | |
220 | |
221 class LocalDevicePerfTestRun(local_device_test_run.LocalDeviceTestRun): | |
222 def __init__(self, env, test_instance): | |
223 super(LocalDevicePerfTestRun, self).__init__(env, test_instance) | |
224 self._test_instance = test_instance | |
jbudorick
2016/07/01 14:20:10
nit: alpha
rnephew (Reviews Here)
2016/07/01 22:09:32
Done.
| |
225 self._env = env | |
226 self._timeout = None if test_instance.no_timeout else 60 * 60 | |
227 self._devices = None | |
228 self._test_buckets = [] | |
229 self._watcher = None | |
230 | |
231 def SetUp(self): | |
232 self._devices = self._GetAllDevices(self._env.devices, | |
233 self._test_instance.known_devices_file) | |
234 self._watcher = watchdog_timer.WatchdogTimer(self._timeout) | |
jbudorick
2016/07/01 14:20:10
again, I don't think this is used any more.
rnephew (Reviews Here)
2016/07/01 22:09:32
Done.
| |
235 | |
236 if (not (self._test_instance.print_step | |
jbudorick
2016/07/01 14:20:10
Do you still need this if check? Neither of these
rnephew (Reviews Here)
2016/07/01 22:09:31
Done.
| |
237 or self._test_instance.output_json_list)): | |
238 if os.path.exists(constants.PERF_OUTPUT_DIR): | |
239 shutil.rmtree(constants.PERF_OUTPUT_DIR) | |
240 os.makedirs(constants.PERF_OUTPUT_DIR) | |
241 | |
242 def TearDown(self): | |
243 pass | |
244 | |
245 def _GetStepsFromDict(self): | |
246 # From where this is called one of these two must be set. | |
jbudorick
2016/07/01 14:20:10
If this is the case, we should fail hard in the ev
rnephew (Reviews Here)
2016/07/01 22:09:32
Done.
| |
247 if self._test_instance.single_step: | |
248 return { | |
249 'version': 1, | |
250 'steps': { | |
251 'single_step': { | |
252 'device_affinity': 0, | |
253 'cmd': self._test_instance.single_step | |
254 }, | |
255 } | |
256 } | |
257 if self._test_instance.steps: | |
258 with file(self._test_instance.steps, 'r') as f: | |
259 steps = json.load(f) | |
260 if steps['version'] != 1: | |
261 raise TestDictVersionError( | |
262 'Version is expected to be %d but was %d' % (1, steps['version'])) | |
263 return steps | |
264 | |
265 def _SplitTestsByAffinity(self): | |
266 test_dict = self._GetStepsFromDict() | |
267 for test, test_config in test_dict['steps'].iteritems(): | |
268 try: | |
269 affinity = test_config['device_affinity'] | |
270 if len(self._test_buckets) < affinity + 1: | |
271 while len(self._test_buckets) != affinity + 1: | |
272 self._test_buckets.append({}) | |
273 self._test_buckets[affinity][test] = test_config | |
274 except KeyError: | |
275 logging.exception('Bad test config') | |
jbudorick
2016/07/01 14:20:10
This should include more detail -- at least the ba
rnephew (Reviews Here)
2016/07/01 22:09:31
Done.
| |
276 return self._test_buckets | |
277 | |
278 @staticmethod | |
279 def _GetAllDevices(active_devices, devices_path): | |
280 try: | |
281 if devices_path: | |
282 devices = [device_utils.DeviceUtils(s) | |
283 for s in device_list.GetPersistentDeviceList(devices_path)] | |
284 if not devices and active_devices: | |
285 logging.warning('%s is empty. Falling back to active devices.', | |
286 devices_path) | |
287 devices = active_devices | |
288 else: | |
289 logging.warning('Known devices file path not being passed. For device ' | |
290 'affinity to work properly, it must be passed.') | |
291 devices = active_devices | |
292 except IOError as e: | |
293 logging.error('Unable to find %s [%s]', devices_path, e) | |
294 devices = active_devices | |
295 return sorted(devices) | |
296 | |
297 def RunTests(self): | |
298 # Affinitize the tests. | |
299 test_buckets = self._SplitTestsByAffinity() | |
300 if not test_buckets: | |
301 raise local_device_test_run.NoTestsError() | |
302 | |
303 blacklist = (device_blacklist.Blacklist(self._env.blacklist) | |
304 if self._env.blacklist | |
305 else None) | |
306 | |
307 def run_perf_tests(x): | |
308 if device_status.IsBlacklisted(str(self._devices[x]), blacklist): | |
309 logging.warning('Device %s is not active. Will not create shard %s.', | |
310 str(self._devices[x]), x) | |
311 return [] | |
312 s = TestShard(self._env, self._test_instance, self._devices[x], x, | |
313 test_buckets[x], watcher=self._watcher, | |
314 retries=self._env.max_tries, timeout=self._timeout) | |
315 return s.RunTestsOnShard() | |
316 | |
317 device_indices = range(min(len(self._devices), len(test_buckets))) | |
318 shards = parallelizer.Parallelizer(device_indices).pMap(run_perf_tests) | |
319 return list(itertools.chain.from_iterable(shards.pGet(self._timeout))) | |
jbudorick
2016/07/01 14:20:10
🔥
rnephew (Reviews Here)
2016/07/01 22:09:32
Acknowledged.
| |
320 | |
321 # override | |
322 def TestPackage(self): | |
323 return 'perf' | |
324 | |
325 # override | |
326 def _CreateShards(self, _tests): | |
327 raise NotImplementedError | |
328 | |
329 # override | |
330 def _GetTests(self): | |
331 return self._test_buckets | |
332 | |
333 # override | |
334 def _RunTest(self, _device, _test): | |
335 raise NotImplementedError | |
336 | |
337 # override | |
338 def _ShouldShard(self): | |
339 return False | |
340 | |
341 | |
342 class LocalDevicePerfTestRunOutputJsonList(LocalDevicePerfTestRun): | |
343 def SetUp(self): | |
344 pass | |
345 | |
346 def RunTests(self): | |
347 return self._test_instance.RunOutputJsonList() | |
348 | |
349 # override | |
350 def _CreateShards(self, _tests): | |
jbudorick
2016/07/01 14:20:10
Do these need to be here?
rnephew (Reviews Here)
2016/07/01 22:09:31
Yep. Linter errors.
| |
351 raise NotImplementedError | |
352 | |
353 # override | |
354 def _RunTest(self, _device, _test): | |
355 raise NotImplementedError | |
356 | |
357 | |
358 class LocalDevicePerfTestRunPrintStep(LocalDevicePerfTestRun): | |
359 def SetUp(self): | |
360 pass | |
361 | |
362 def RunTests(self): | |
363 return self._test_instance.RunPrintStep() | |
364 | |
365 # override | |
366 def _CreateShards(self, _tests): | |
367 raise NotImplementedError | |
368 | |
369 # override | |
370 def _RunTest(self, _device, _test): | |
371 raise NotImplementedError | |
372 | |
373 | |
374 class TestDictVersionError(Exception): | |
375 pass | |
OLD | NEW |