Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(111)

Side by Side Diff: build/android/pylib/local/device/local_device_perf_test_run.py

Issue 2012323002: [Android] Implement perf tests to platform mode. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: [Android] Implement perf tests to platform mode. Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 # Copyright 2016 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
4
5 import io
6 import itertools
7 import json
8 import logging
9 import os
10 import pickle
11 import shutil
12 import tempfile
13 import time
14 import zipfile
15
16 from devil.android import battery_utils
17 from devil.android import device_blacklist
18 from devil.android import device_errors
19 from devil.android import device_list
20 from devil.android import device_utils
21 from devil.android import forwarder
22 from devil.android.tools import device_recovery
23 from devil.android.tools import device_status
24 from devil.utils import cmd_helper
25 from devil.utils import parallelizer
26 from pylib import constants
27 from pylib.base import base_test_result
28 from pylib.constants import host_paths
29 from pylib.local.device import local_device_test_run
30
31
32 class TestShard(object):
33 def __init__(
34 self, env, test_instance, device, index, tests, retries=3, timeout=None):
35 logging.info('Create shard %s for device %s to run the following tests:',
36 index, device)
37 for t in tests:
38 logging.info(' %s', t)
39 self._battery = battery_utils.BatteryUtils(device)
40 self._device = device
41 self._env = env
42 self._index = index
43 self._output_dir = None
44 self._results = []
45 self._retries = retries
46 self._test_instance = test_instance
47 self._tests = tests
48 self._timeout = timeout
49
50 @local_device_test_run.handle_shard_failures
51 def RunTestsOnShard(self):
52 for test in self._tests:
53 tries_left = self._retries
54 result_type = None
55 while (result_type != base_test_result.ResultType.PASS
56 and tries_left > 0):
57 try:
58 self._TestSetUp(test)
59 result_type = self._RunSingleTest(test)
60 except device_errors.CommandFailedError:
jbudorick 2016/07/06 19:12:10 We should either: - have this catch device_errors
rnephew (Reviews Here) 2016/07/06 21:47:06 Done.
61 logging.exception('Exception when executing %s.', test)
62 result_type = base_test_result.ResultType.FAIL
63 finally:
64 self._TestTearDown()
65 if result_type != base_test_result.ResultType.PASS:
66 try:
67 device_recovery.RecoverDevice(self._device, self._env.blacklist)
68 except device_errors.CommandTimeoutError:
69 logging.exception(
70 'Device failed to recover after failing %s.', test)
71 tries_left = tries_left - 1
72 result = base_test_result.TestRunResults()
jbudorick 2016/07/06 19:12:10 nit: blank line before this one
rnephew (Reviews Here) 2016/07/06 21:47:06 Done.
73 result.AddResult(base_test_result.BaseTestResult(test, result_type))
74 self._results.append(result)
jbudorick 2016/07/06 19:12:10 A list of single-result TestRunResults objects see
rnephew (Reviews Here) 2016/07/06 21:47:06 Done.
75 return self._results
76
77 def _TestSetUp(self, test):
78 if not self._device.IsOnline():
79 msg = 'Device %s is unresponsive.' % str(self._device)
80 raise device_errors.DeviceUnreachableError(msg)
81
82 logging.info('Charge level: %s%%',
83 str(self._battery.GetBatteryInfo().get('level')))
84 if self._test_instance.min_battery_level:
85 self._battery.ChargeDeviceToLevel(self._test_instance.min_battery_level)
86
87 logging.info('temperature: %s (0.1 C)',
88 str(self._battery.GetBatteryInfo().get('temperature')))
89 if self._test_instance.max_battery_temp:
90 self._battery.LetBatteryCoolToTemperature(
91 self._test_instance.max_battery_temp)
92
93 if not self._device.IsScreenOn():
94 self._device.SetScreen(True)
95
96 if (self._test_instance.collect_chartjson_data
97 or self._tests[test].get('archive_output_dir')):
98 self._output_dir = tempfile.mkdtemp()
99
100 def _RunSingleTest(self, test):
jbudorick 2016/07/06 19:12:10 The sequencing in this function is a bit odd. I th
rnephew (Reviews Here) 2016/07/06 21:47:06 Done.
101
102 logging.info('Running %s on shard %d', test, self._index)
103 timeout = self._tests[test].get('timeout', self._timeout)
jbudorick 2016/07/06 19:12:10 timeout is created here but not used (beyond loggi
104 logging.info('Timeout for %s test: %d', test, timeout)
105
106 cmd = self._CreateCmd(test)
jbudorick 2016/07/06 19:12:10 cmd is created here but not used at all until the
107 self._test_instance.WriteBuildBotJson(self._output_dir)
108 cwd = os.path.abspath(host_paths.DIR_SOURCE_ROOT)
109
110 try:
111 logging.debug("Running test with command '%s'", cmd)
jbudorick 2016/07/06 19:12:10 Why is this in the try block?
rnephew (Reviews Here) 2016/07/06 21:47:06 Done.
112 start_time = time.time()
113 exit_code, output = cmd_helper.GetCmdStatusAndOutputWithTimeout(
114 cmd, timeout, cwd=cwd, shell=True)
115 end_time = time.time()
116 json_output = self._test_instance.ReadChartjsonOutput(self._output_dir)
117 timed_out = False
118 except cmd_helper.TimeoutError as e:
119 end_time = time.time()
120 exit_code = -1
121 output = e.output
122 json_output = ''
123 timed_out = True
jbudorick 2016/07/06 19:12:10 Can we just deal with the ResultType explicitly?
rnephew (Reviews Here) 2016/07/06 21:47:06 Done.
124
125 return self._ProcessTestResult(test, cmd, start_time, end_time, exit_code,
126 output, json_output, timed_out)
127
128 def _CreateCmd(self, test):
129 cmd = '%s --device %s' % (self._tests[test]['cmd'], str(self._device))
130 if self._output_dir:
131 cmd = cmd + ' --output-dir=%s' % self._output_dir
132 if self._test_instance.dry_run:
133 cmd = 'echo %s' % cmd
134 return cmd
135
136 def _ProcessTestResult(self, test, cmd, start_time, end_time, exit_code,
137 output, json_output, timed_out):
138 if exit_code is None:
139 exit_code = -1
140 logging.info('%s : exit_code=%d in %d secs on device %s',
141 test, exit_code, end_time - start_time,
142 str(self._device))
143 if timed_out:
144 result_type = base_test_result.ResultType.TIMEOUT
145 elif exit_code == 0:
146 result_type = base_test_result.ResultType.PASS
147 else:
148 result_type = base_test_result.ResultType.FAIL
149 actual_exit_code = exit_code
150 if (self._test_instance.flaky_steps
151 and test in self._test_instance.flaky_steps):
152 exit_code = 0
153 archive_bytes = (self._ArchiveOutputDir()
154 if self._tests[test].get('archive_output_dir')
155 else None)
156 persisted_result = {
157 'name': test,
158 'output': [output],
159 'chartjson': json_output,
160 'archive_bytes': archive_bytes,
161 'exit_code': exit_code,
162 'actual_exit_code': actual_exit_code,
163 'result_type': result_type,
164 'start_time': start_time,
165 'end_time': end_time,
166 'total_time': end_time - start_time,
167 'device': str(self._device),
168 'cmd': cmd,
169 }
170 self._SaveResult(persisted_result)
171 return result_type
172
173 def _ArchiveOutputDir(self):
174 """Archive all files in the output dir, and return as compressed bytes."""
175 with io.BytesIO() as archive:
176 with zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED) as contents:
177 num_files = 0
178 for absdir, _, files in os.walk(self._output_dir):
179 reldir = os.path.relpath(absdir, self._output_dir)
180 for filename in files:
181 src_path = os.path.join(absdir, filename)
182 # We use normpath to turn './file.txt' into just 'file.txt'.
183 dst_path = os.path.normpath(os.path.join(reldir, filename))
184 contents.write(src_path, dst_path)
185 num_files += 1
186 if num_files:
187 logging.info('%d files in the output dir were archived.', num_files)
188 else:
189 logging.warning('No files in the output dir. Archive is empty.')
190 return archive.getvalue()
191
192 @staticmethod
193 def _SaveResult(result):
194 pickled = os.path.join(constants.PERF_OUTPUT_DIR, result['name'])
195 if os.path.exists(pickled):
196 with file(pickled, 'r') as f:
197 previous = pickle.loads(f.read())
198 result['output'] = previous['output'] + result['output']
199 with file(pickled, 'w') as f:
200 f.write(pickle.dumps(result))
201
202 def _TestTearDown(self):
203 if self._output_dir:
204 shutil.rmtree(self._output_dir, ignore_errors=True)
205 self._output_dir = None
206 try:
207 logging.info('Unmapping device ports for %s.', self._device)
208 forwarder.Forwarder.UnmapAllDevicePorts(self._device)
209 except Exception: # pylint: disable=broad-except
210 logging.exception('Exception when resetting ports.')
211
212
213 class LocalDevicePerfTestRun(local_device_test_run.LocalDeviceTestRun):
214 def __init__(self, env, test_instance):
215 super(LocalDevicePerfTestRun, self).__init__(env, test_instance)
216 self._devices = None
217 self._env = env
218 self._test_buckets = []
219 self._test_instance = test_instance
220 self._timeout = None if test_instance.no_timeout else 60 * 60
221
222 def SetUp(self):
223 self._devices = self._GetAllDevices(self._env.devices,
224 self._test_instance.known_devices_file)
225
226 if os.path.exists(constants.PERF_OUTPUT_DIR):
227 shutil.rmtree(constants.PERF_OUTPUT_DIR)
228 os.makedirs(constants.PERF_OUTPUT_DIR)
229
230 def TearDown(self):
231 pass
232
233 def _GetStepsFromDict(self):
234 # From where this is called one of these two must be set.
235 if not (self._test_instance.single_step or self._test_instance.steps):
236 raise PerfTestRunGetStepsError(
jbudorick 2016/07/06 19:12:10 Just raise this at the end of the function w/o the
rnephew (Reviews Here) 2016/07/06 21:47:06 Done.
237 'Neither single_step or steps set in test_instance.')
238 if self._test_instance.single_step:
239 return {
240 'version': 1,
241 'steps': {
242 'single_step': {
243 'device_affinity': 0,
244 'cmd': self._test_instance.single_step
245 },
246 }
247 }
248 if self._test_instance.steps:
249 with file(self._test_instance.steps, 'r') as f:
250 steps = json.load(f)
251 if steps['version'] != 1:
252 raise TestDictVersionError(
253 'Version is expected to be %d but was %d' % (1, steps['version']))
254 return steps
255
256 def _SplitTestsByAffinity(self):
257 # This splits tests by their device affinity; so that the same tests always
258 # run on the same devices. This is important for perf tests since different
259 # devices might yield slightly different performance results.
260 test_dict = self._GetStepsFromDict()
261 for test, test_config in test_dict['steps'].iteritems():
262 try:
263 affinity = test_config['device_affinity']
264 if len(self._test_buckets) < affinity + 1:
265 while len(self._test_buckets) != affinity + 1:
266 self._test_buckets.append({})
267 self._test_buckets[affinity][test] = test_config
268 except KeyError:
269 logging.exception(
270 'Test config for %s is bad.\n Config:%s', test, str(test_config))
271 return self._test_buckets
jbudorick 2016/07/06 19:12:10 Why is this creating an instance variable and retu
rnephew (Reviews Here) 2016/07/06 21:47:06 Done.
272
273 @staticmethod
274 def _GetAllDevices(active_devices, devices_path):
275 try:
276 if devices_path:
277 devices = [device_utils.DeviceUtils(s)
278 for s in device_list.GetPersistentDeviceList(devices_path)]
279 if not devices and active_devices:
280 logging.warning('%s is empty. Falling back to active devices.',
281 devices_path)
282 devices = active_devices
283 else:
284 logging.warning('Known devices file path not being passed. For device '
285 'affinity to work properly, it must be passed.')
286 devices = active_devices
287 except IOError as e:
288 logging.error('Unable to find %s [%s]', devices_path, e)
289 devices = active_devices
290 return sorted(devices)
291
292 def RunTests(self):
293 # Affinitize the tests.
294 test_buckets = self._SplitTestsByAffinity()
295 if not test_buckets:
296 raise local_device_test_run.NoTestsError()
297
298 blacklist = (device_blacklist.Blacklist(self._env.blacklist)
299 if self._env.blacklist
300 else None)
301
302 def run_perf_tests(x):
303 if device_status.IsBlacklisted(str(self._devices[x]), blacklist):
304 logging.warning('Device %s is not active. Will not create shard %s.',
305 str(self._devices[x]), x)
306 return []
307 s = TestShard(self._env, self._test_instance, self._devices[x], x,
308 test_buckets[x], retries=self._env.max_tries,
309 timeout=self._timeout)
310 return s.RunTestsOnShard()
311
312 device_indices = range(min(len(self._devices), len(test_buckets)))
313 shards = parallelizer.Parallelizer(device_indices).pMap(run_perf_tests)
314 return list(itertools.chain.from_iterable(shards.pGet(self._timeout)))
315
316 # override
317 def TestPackage(self):
318 return 'perf'
319
320 # override
321 def _CreateShards(self, _tests):
322 raise NotImplementedError
323
324 # override
325 def _GetTests(self):
326 return self._test_buckets
327
328 # override
329 def _RunTest(self, _device, _test):
330 raise NotImplementedError
331
332 # override
333 def _ShouldShard(self):
334 return False
335
336
337 class LocalDevicePerfTestRunOutputJsonList(LocalDevicePerfTestRun):
338 def SetUp(self):
339 pass
340
341 def RunTests(self):
342 return self._test_instance.RunOutputJsonList()
343
344 # override
345 def _CreateShards(self, _tests):
346 raise NotImplementedError
347
348 # override
349 def _RunTest(self, _device, _test):
350 raise NotImplementedError
351
352
353 class LocalDevicePerfTestRunPrintStep(LocalDevicePerfTestRun):
354 def SetUp(self):
355 pass
356
357 def RunTests(self):
358 return self._test_instance.RunPrintStep()
359
360 # override
361 def _CreateShards(self, _tests):
362 raise NotImplementedError
363
364 # override
365 def _RunTest(self, _device, _test):
366 raise NotImplementedError
367
368
369 class TestDictVersionError(Exception):
370 pass
371
372 class PerfTestRunGetStepsError(Exception):
373 pass
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698