OLD | NEW |
---|---|
(Empty) | |
1 # Copyright 2016 The Chromium Authors. All rights reserved. | |
2 # Use of this source code is governed by a BSD-style license that can be | |
3 # found in the LICENSE file. | |
4 | |
5 import io | |
6 import itertools | |
7 import json | |
8 import logging | |
9 import os | |
10 import pickle | |
11 import shutil | |
12 import tempfile | |
13 import time | |
14 import zipfile | |
15 | |
16 from devil.android import battery_utils | |
17 from devil.android import device_blacklist | |
18 from devil.android import device_errors | |
19 from devil.android import device_list | |
20 from devil.android import device_utils | |
21 from devil.android import forwarder | |
22 from devil.android.tools import device_recovery | |
23 from devil.android.tools import device_status | |
24 from devil.utils import cmd_helper | |
25 from devil.utils import parallelizer | |
26 from pylib import constants | |
27 from pylib.base import base_test_result | |
28 from pylib.constants import host_paths | |
29 from pylib.local.device import local_device_test_run | |
30 | |
31 | |
32 class TestShard(object): | |
33 def __init__( | |
34 self, env, test_instance, device, index, tests, retries=3, timeout=None): | |
35 logging.info('Create shard %s for device %s to run the following tests:', | |
36 index, device) | |
37 for t in tests: | |
38 logging.info(' %s', t) | |
39 self._battery = battery_utils.BatteryUtils(device) | |
40 self._device = device | |
41 self._env = env | |
42 self._index = index | |
43 self._output_dir = None | |
44 self._results = [] | |
45 self._retries = retries | |
46 self._test_instance = test_instance | |
47 self._tests = tests | |
48 self._timeout = timeout | |
49 | |
50 @local_device_test_run.handle_shard_failures | |
51 def RunTestsOnShard(self): | |
52 for test in self._tests: | |
53 try: | |
54 tries_left = self._retries | |
55 result_type = None | |
56 while (result_type != base_test_result.ResultType.PASS | |
57 and tries_left > 0): | |
58 try: | |
59 self._TestSetUp(test) | |
60 result_type = self._RunSingleTest(test) | |
61 except device_errors.CommandFailedError: | |
62 logging.exception('Exception when executing %s.', test) | |
63 result_type = base_test_result.ResultType.FAIL | |
64 finally: | |
65 if result_type != base_test_result.ResultType.PASS: | |
66 try: | |
67 device_recovery.RecoverDevice(self._device, self._env.blacklist) | |
68 except device_errors.CommandTimeoutError: | |
69 logging.exception( | |
70 'Device failed to recover after failing %s.', test) | |
71 tries_left = tries_left - 1 | |
72 result = base_test_result.TestRunResults() | |
73 result.AddResult(base_test_result.BaseTestResult(test, result_type)) | |
74 self._results.append(result) | |
75 finally: | |
76 self._TestTearDown() | |
mikecase (-- gone --)
2016/07/01 23:29:44
I think* since self.TestSetUp is called in the inn
rnephew (Reviews Here)
2016/07/06 15:32:39
Yeah, I put this in the wrong finally block.
| |
77 return self._results | |
78 | |
79 def _TestSetUp(self, test): | |
80 if not self._device.IsOnline(): | |
81 msg = 'Device %s is unresponsive.' % str(self._device) | |
82 raise device_errors.DeviceUnreachableError(msg) | |
83 | |
84 logging.info('Charge level: %s%%', | |
85 str(self._battery.GetBatteryInfo().get('level'))) | |
86 if self._test_instance.min_battery_level: | |
87 self._battery.ChargeDeviceToLevel(self._test_instance.min_battery_level) | |
88 | |
89 logging.info('temperature: %s (0.1 C)', | |
90 str(self._battery.GetBatteryInfo().get('temperature'))) | |
91 if self._test_instance.max_battery_temp: | |
92 self._battery.LetBatteryCoolToTemperature( | |
93 self._test_instance.max_battery_temp) | |
94 | |
95 if not self._device.IsScreenOn(): | |
96 self._device.SetScreen(True) | |
97 | |
98 if (self._test_instance.collect_chartjson_data | |
99 or self._tests[test].get('archive_output_dir')): | |
100 self._output_dir = tempfile.mkdtemp() | |
101 | |
102 def _RunSingleTest(self, test): | |
103 | |
104 logging.info('Running %s on shard %d', test, self._index) | |
105 timeout = self._tests[test].get('timeout', self._timeout) | |
106 logging.info('Timeout for %s test: %d', test, timeout) | |
107 | |
108 cmd = self._CreateCmd(test) | |
109 self._test_instance.WriteBuildBotJson(self._output_dir) | |
110 cwd = os.path.abspath(host_paths.DIR_SOURCE_ROOT) | |
111 | |
112 try: | |
113 logging.debug("Running test with command '%s'", cmd) | |
114 start_time = time.time() | |
115 exit_code, output = cmd_helper.GetCmdStatusAndOutputWithTimeout( | |
116 cmd, timeout, cwd=cwd, shell=True) | |
117 end_time = time.time() | |
118 json_output = self._test_instance.ReadChartjsonOutput(self._output_dir) | |
119 timed_out = False | |
120 except cmd_helper.TimeoutError as e: | |
121 end_time = time.time() | |
122 exit_code = -1 | |
123 output = e.output | |
124 json_output = '' | |
125 timed_out = True | |
126 | |
127 return self._ProcessTestResult(test, cmd, start_time, end_time, exit_code, | |
128 output, json_output, timed_out) | |
129 | |
130 def _CreateCmd(self, test): | |
131 cmd = '%s --device %s' % (self._tests[test]['cmd'], str(self._device)) | |
132 if self._output_dir: | |
133 cmd = cmd + ' --output-dir=%s' % self._output_dir | |
134 if self._test_instance.dry_run: | |
135 cmd = 'echo %s' % cmd | |
136 return cmd | |
137 | |
138 def _ProcessTestResult(self, test, cmd, start_time, end_time, exit_code, | |
139 output, json_output, timed_out): | |
140 if exit_code is None: | |
141 exit_code = -1 | |
142 logging.info('%s : exit_code=%d in %d secs on device %s', | |
143 test, exit_code, end_time - start_time, | |
144 str(self._device)) | |
145 if timed_out: | |
146 result_type = base_test_result.ResultType.TIMEOUT | |
147 elif exit_code == 0: | |
148 result_type = base_test_result.ResultType.PASS | |
149 else: | |
150 result_type = base_test_result.ResultType.FAIL | |
151 actual_exit_code = exit_code | |
mikecase (-- gone --)
2016/07/01 23:29:44
Should the fact that you are setting exit_code to
rnephew (Reviews Here)
2016/07/06 15:32:39
In porting the old code I saw this and figured Non
| |
152 if (self._test_instance.flaky_steps | |
153 and test in self._test_instance.flaky_steps): | |
154 exit_code = 0 | |
155 archive_bytes = (self._ArchiveOutputDir() | |
156 if self._tests[test].get('archive_output_dir') | |
157 else None) | |
158 persisted_result = { | |
159 'name': test, | |
160 'output': [output], | |
161 'chartjson': json_output, | |
162 'archive_bytes': archive_bytes, | |
163 'exit_code': exit_code, | |
164 'actual_exit_code': actual_exit_code, | |
165 'result_type': result_type, | |
166 'start_time': start_time, | |
167 'end_time': end_time, | |
168 'total_time': end_time - start_time, | |
169 'device': str(self._device), | |
170 'cmd': cmd, | |
171 } | |
172 self._SaveResult(persisted_result) | |
173 return result_type | |
174 | |
175 def _ArchiveOutputDir(self): | |
176 """Archive all files in the output dir, and return as compressed bytes.""" | |
177 with io.BytesIO() as archive: | |
178 with zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED) as contents: | |
179 num_files = 0 | |
180 for absdir, _, files in os.walk(self._output_dir): | |
181 reldir = os.path.relpath(absdir, self._output_dir) | |
182 for filename in files: | |
183 src_path = os.path.join(absdir, filename) | |
184 # We use normpath to turn './file.txt' into just 'file.txt'. | |
185 dst_path = os.path.normpath(os.path.join(reldir, filename)) | |
186 contents.write(src_path, dst_path) | |
187 num_files += 1 | |
188 if num_files: | |
189 logging.info('%d files in the output dir were archived.', num_files) | |
190 else: | |
191 logging.warning('No files in the output dir. Archive is empty.') | |
192 return archive.getvalue() | |
193 | |
194 @staticmethod | |
195 def _SaveResult(result): | |
196 pickled = os.path.join(constants.PERF_OUTPUT_DIR, result['name']) | |
197 if os.path.exists(pickled): | |
198 with file(pickled, 'r') as f: | |
199 previous = pickle.loads(f.read()) | |
200 result['output'] = previous['output'] + result['output'] | |
201 with file(pickled, 'w') as f: | |
202 f.write(pickle.dumps(result)) | |
203 | |
204 def _TestTearDown(self): | |
205 if self._output_dir: | |
206 shutil.rmtree(self._output_dir, ignore_errors=True) | |
207 self._output_dir = None | |
208 try: | |
209 logging.info('Unmapping device ports for %s.', self._device) | |
210 forwarder.Forwarder.UnmapAllDevicePorts(self._device) | |
211 except Exception: # pylint: disable=broad-except | |
212 logging.exception('Exception when resetting ports.') | |
213 | |
214 | |
215 class LocalDevicePerfTestRun(local_device_test_run.LocalDeviceTestRun): | |
216 def __init__(self, env, test_instance): | |
217 super(LocalDevicePerfTestRun, self).__init__(env, test_instance) | |
218 self._devices = None | |
219 self._env = env | |
220 self._test_buckets = [] | |
221 self._test_instance = test_instance | |
222 self._timeout = None if test_instance.no_timeout else 60 * 60 | |
223 | |
224 def SetUp(self): | |
225 self._devices = self._GetAllDevices(self._env.devices, | |
226 self._test_instance.known_devices_file) | |
227 | |
228 if os.path.exists(constants.PERF_OUTPUT_DIR): | |
229 shutil.rmtree(constants.PERF_OUTPUT_DIR) | |
230 os.makedirs(constants.PERF_OUTPUT_DIR) | |
231 | |
232 def TearDown(self): | |
233 pass | |
234 | |
235 def _GetStepsFromDict(self): | |
236 # From where this is called one of these two must be set. | |
237 if not (self._test_instance.single_step or self._test_instance.steps): | |
238 raise PerfTestRunGetStepsError( | |
239 'Neither single_step or steps set in test_instance.') | |
240 if self._test_instance.single_step: | |
241 return { | |
242 'version': 1, | |
243 'steps': { | |
244 'single_step': { | |
245 'device_affinity': 0, | |
246 'cmd': self._test_instance.single_step | |
247 }, | |
248 } | |
249 } | |
250 if self._test_instance.steps: | |
251 with file(self._test_instance.steps, 'r') as f: | |
252 steps = json.load(f) | |
253 if steps['version'] != 1: | |
254 raise TestDictVersionError( | |
255 'Version is expected to be %d but was %d' % (1, steps['version'])) | |
256 return steps | |
257 | |
258 def _SplitTestsByAffinity(self): | |
259 test_dict = self._GetStepsFromDict() | |
260 for test, test_config in test_dict['steps'].iteritems(): | |
261 try: | |
262 affinity = test_config['device_affinity'] | |
263 if len(self._test_buckets) < affinity + 1: | |
mikecase (-- gone --)
2016/07/01 23:29:44
You could make self._test_buckets a dict, like...
rnephew (Reviews Here)
2016/07/06 15:32:39
Yeah, I prefer it to just be lists of lists of tes
| |
264 while len(self._test_buckets) != affinity + 1: | |
265 self._test_buckets.append({}) | |
266 self._test_buckets[affinity][test] = test_config | |
267 except KeyError: | |
268 logging.exception( | |
269 'Test config for %s is bad.\n Config:%s', test, str(test_config)) | |
270 return self._test_buckets | |
271 | |
272 @staticmethod | |
273 def _GetAllDevices(active_devices, devices_path): | |
274 try: | |
275 if devices_path: | |
276 devices = [device_utils.DeviceUtils(s) | |
277 for s in device_list.GetPersistentDeviceList(devices_path)] | |
278 if not devices and active_devices: | |
279 logging.warning('%s is empty. Falling back to active devices.', | |
280 devices_path) | |
281 devices = active_devices | |
282 else: | |
283 logging.warning('Known devices file path not being passed. For device ' | |
284 'affinity to work properly, it must be passed.') | |
285 devices = active_devices | |
286 except IOError as e: | |
287 logging.error('Unable to find %s [%s]', devices_path, e) | |
288 devices = active_devices | |
289 return sorted(devices) | |
290 | |
291 def RunTests(self): | |
292 # Affinitize the tests. | |
mikecase (-- gone --)
2016/07/01 23:29:44
We should have some documentation on what "Affinit
rnephew (Reviews Here)
2016/07/06 15:32:39
Done.
| |
293 test_buckets = self._SplitTestsByAffinity() | |
294 if not test_buckets: | |
295 raise local_device_test_run.NoTestsError() | |
296 | |
297 blacklist = (device_blacklist.Blacklist(self._env.blacklist) | |
298 if self._env.blacklist | |
299 else None) | |
300 | |
301 def run_perf_tests(x): | |
302 if device_status.IsBlacklisted(str(self._devices[x]), blacklist): | |
303 logging.warning('Device %s is not active. Will not create shard %s.', | |
304 str(self._devices[x]), x) | |
305 return [] | |
306 s = TestShard(self._env, self._test_instance, self._devices[x], x, | |
307 test_buckets[x], retries=self._env.max_tries, | |
308 timeout=self._timeout) | |
309 return s.RunTestsOnShard() | |
310 | |
311 device_indices = range(min(len(self._devices), len(test_buckets))) | |
312 shards = parallelizer.Parallelizer(device_indices).pMap(run_perf_tests) | |
313 return list(itertools.chain.from_iterable(shards.pGet(self._timeout))) | |
314 | |
315 # override | |
316 def TestPackage(self): | |
317 return 'perf' | |
318 | |
319 # override | |
320 def _CreateShards(self, _tests): | |
321 raise NotImplementedError | |
322 | |
323 # override | |
324 def _GetTests(self): | |
325 return self._test_buckets | |
326 | |
327 # override | |
328 def _RunTest(self, _device, _test): | |
329 raise NotImplementedError | |
330 | |
331 # override | |
332 def _ShouldShard(self): | |
333 return False | |
334 | |
335 | |
336 class LocalDevicePerfTestRunOutputJsonList(LocalDevicePerfTestRun): | |
mikecase (-- gone --)
2016/07/01 23:29:44
what a name :D
rnephew (Reviews Here)
2016/07/06 15:32:39
Acknowledged.
| |
337 def SetUp(self): | |
338 pass | |
339 | |
340 def RunTests(self): | |
341 return self._test_instance.RunOutputJsonList() | |
342 | |
343 # override | |
344 def _CreateShards(self, _tests): | |
345 raise NotImplementedError | |
346 | |
347 # override | |
348 def _RunTest(self, _device, _test): | |
349 raise NotImplementedError | |
350 | |
351 | |
352 class LocalDevicePerfTestRunPrintStep(LocalDevicePerfTestRun): | |
353 def SetUp(self): | |
354 pass | |
355 | |
356 def RunTests(self): | |
357 return self._test_instance.RunPrintStep() | |
358 | |
359 # override | |
360 def _CreateShards(self, _tests): | |
361 raise NotImplementedError | |
362 | |
363 # override | |
364 def _RunTest(self, _device, _test): | |
365 raise NotImplementedError | |
366 | |
367 | |
368 class TestDictVersionError(Exception): | |
369 pass | |
370 | |
371 class PerfTestRunGetStepsError(Exception): | |
372 pass | |
OLD | NEW |