Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(582)

Side by Side Diff: build/android/pylib/local/device/local_device_perf_test_run.py

Issue 2012323002: [Android] Implement perf tests to platform mode. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: do not run as default and rebase Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 # Copyright 2016 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
4
5 import io
6 import json
7 import logging
8 import os
9 import pickle
10 import shutil
11 import tempfile
12 import time
13 import zipfile
14
15 from devil.android import battery_utils
16 from devil.android import device_errors
17 from devil.android import device_list
18 from devil.android import device_utils
19 from devil.android import forwarder
20 from devil.android.tools import device_recovery
21 from devil.utils import cmd_helper
22 from devil.utils import reraiser_thread
23 from devil.utils import watchdog_timer
24 from pylib import constants
25 from pylib.base import base_test_result
26 from pylib.constants import host_paths
27 from pylib.local.device import local_device_test_run
28
29
30 class TestShard(object):
31 def __init__(self, env, test_instance, device, index, tests, results,
32 watcher=None, retries=3, timeout=None):
33 logging.info('Create shard %s for device %s to run the following tests:',
34 index, device)
35 for t in tests:
36 logging.info(' %s', t)
37 self._battery = battery_utils.BatteryUtils(device)
38 self._device = device
39 self._env = env
40 self._index = index
41 self._output_dir = None
42 self._results = results
43 self._retries = retries
44 self._test_instance = test_instance
45 self._tests = tests
46 self._timeout = timeout
47 self._watcher = watcher
48
49 def _TestSetUp(self, test):
50 if self._watcher:
51 self._watcher.Reset()
52
53 logging.info('Charge level: %s%%',
54 str(self._battery.GetBatteryInfo().get('level')))
55 if self._test_instance.min_battery_level:
56 self._battery.ChargeDeviceToLevel(self._test_instance.min_battery_level)
57
58 logging.info('temperature: %s (0.1 C)',
59 str(self._battery.GetBatteryInfo().get('temperature')))
60 if self._test_instance.max_battery_temp:
61 self._battery.LetBatteryCoolToTemperature(
62 self._test_instance.max_battery_temp)
63
64 if not self._device.IsScreenOn():
65 self._device.SetScreen(True)
66
67 if not self._device.IsOnline():
68 msg = 'Device %s is unresponsive.' % str(self._device)
69 raise device_errors.DeviceUnreachableError(msg)
70 if self._output_dir:
71 shutil.rmtree(self._output_dir)
72 if (self._test_instance.collect_chartjson_data
73 or self._tests[test].get('archive_output_dir')):
74 self._output_dir = tempfile.mkdtemp()
75 if self._watcher:
76 self._watcher.Reset()
77
78 def _TestTearDown(self):
79 try:
80 logging.info('Unmapping device ports for %s.', self._device)
81 forwarder.Forwarder.UnmapAllDevicePorts(self._device)
jbudorick 2016/06/28 10:27:32 Why is this unmapping but not mapping? What's misb
rnephew (Reviews Here) 2016/06/29 22:27:19 run_benchmark takes care of the mapping. This just
jbudorick 2016/07/01 14:20:09 Does the instance of the Forwarder in this process
jbudorick 2016/07/06 19:12:09 ^
82 except Exception: # pylint: disable=broad-except
83 logging.exception('Exception when resetting ports.')
84
85 def _CreateCmd(self, test):
86 cmd = '%s --device %s' % (self._tests[test]['cmd'], str(self._device))
87 if self._output_dir:
88 cmd = cmd + ' --output-dir=%s' % self._output_dir
89 if self._test_instance.dry_run:
90 cmd = 'echo %s' % cmd
91 return cmd
92
93 def _RunSingleTest(self, test):
94
95 logging.info('Running %s on shard %s', test, self._index)
jbudorick 2016/06/28 10:27:33 str(self._index) or %d?
rnephew (Reviews Here) 2016/06/29 22:27:19 Done.
96 timeout = (
97 None if self._test_instance.no_timeout
98 else self._tests[test].get('timeout', self._timeout))
99 logging.info('Timeout for %s test: %s', test, timeout)
jbudorick 2016/06/28 10:27:32 str(timeout) or %d?
rnephew (Reviews Here) 2016/06/29 22:27:19 Done.
100
101 cmd = self._CreateCmd(test)
102 self._test_instance.WriteBuildBotJson(self._output_dir)
103 cwd = os.path.abspath(host_paths.DIR_SOURCE_ROOT)
104
105 try:
106 logging.debug('Running test with command \'%s\'', cmd)
jbudorick 2016/06/28 10:27:32 nit: Use double quotes for the string when it cont
rnephew (Reviews Here) 2016/06/29 22:27:19 Done.
107 start_time = time.time()
108 exit_code, output = cmd_helper.GetCmdStatusAndOutputWithTimeout(
109 cmd, timeout, cwd=cwd, shell=True)
110 end_time = time.time()
111 json_output = self._test_instance.ReadChartjsonOutput(self._output_dir)
112 except cmd_helper.TimeoutError as e:
113 exit_code = -1
114 output = e.output
115 json_output = ''
116
117 return self._ProcessTestResult(
118 test, cmd, start_time, end_time, exit_code, output, json_output)
119
120 def _ProcessTestResult(
121 self, test, cmd, start_time, end_time, exit_code, output, json_output):
122 if exit_code is None:
123 exit_code = -1
124 logging.info('%s : exit_code=%d in %d secs on device %s',
125 test, exit_code, end_time - start_time,
126 str(self._device))
127 if exit_code == 0:
128 result_type = base_test_result.ResultType.PASS
129 else:
130 result_type = base_test_result.ResultType.FAIL
131 # TODO(rnephew): Improve device recovery logic.
jbudorick 2016/06/28 10:27:32 Why is this in _ProcessTestResult? I think this sh
rnephew (Reviews Here) 2016/06/29 22:27:19 Done.
132 try:
133 device_recovery.RecoverDevice(self._device, self._env.blacklist)
134 except device_errors.CommandTimeoutError:
135 logging.exception('Device failed to return after %s.', test)
136 actual_exit_code = exit_code
137 if (self._test_instance.flaky_steps
138 and test in self._test_instance.flaky_steps):
139 exit_code = 0
140 archive_bytes = (self._ArchiveOutputDir()
141 if self._tests[test].get('archive_output_dir')
142 else None)
143 persisted_result = {
144 'name': test,
145 'output': [output],
146 'chartjson': json_output,
147 'archive_bytes': archive_bytes,
148 'exit_code': exit_code,
149 'actual_exit_code': actual_exit_code,
150 'result_type': result_type,
151 'start_time': start_time,
152 'end_time': end_time,
153 'total_time': end_time - start_time,
154 'device': str(self._device),
155 'cmd': cmd,
156 }
157 self._SaveResult(persisted_result)
158 return result_type
159
160 @local_device_test_run.handle_shard_failures
161 def RunTestsOnShard(self):
jbudorick 2016/06/28 10:27:32 The functions in this class are ordered strangely.
rnephew (Reviews Here) 2016/06/29 22:27:19 Done.
162 for test in self._tests:
163 try:
164 tries_left = self._retries
165 result_type = None
166 while (result_type != base_test_result.ResultType.PASS
jbudorick 2016/06/28 10:27:32 This retry mechanism is the inverse of how gtests
rnephew (Reviews Here) 2016/06/29 22:27:19 So that we do not have to store which tests are pa
jbudorick 2016/07/01 14:20:09 sgtm
167 and tries_left > 0):
168 try:
169 self._TestSetUp(test)
170 result_type = self._RunSingleTest(test)
171 except Exception: # pylint: disable=broad-except
172 logging.exception('Exception when executing %s.', test)
173 result_type = base_test_result.ResultType.FAIL
174 finally:
175 tries_left = tries_left - 1
176 self._TestTearDown()
177 result = base_test_result.TestRunResults()
178 result.AddResult(base_test_result.BaseTestResult(test, result_type))
179 self._results.append(result)
180 finally:
181 if self._output_dir:
182 shutil.rmtree(self._output_dir, ignore_errors=True)
183 self._output_dir = None
184
185 @staticmethod
186 def _SaveResult(result):
187 pickled = os.path.join(constants.PERF_OUTPUT_DIR, result['name'])
188 if os.path.exists(pickled):
189 with file(pickled, 'r') as f:
190 previous = pickle.loads(f.read())
191 result['output'] = previous['output'] + result['output']
192 with file(pickled, 'w') as f:
193 f.write(pickle.dumps(result))
194
195 def _ArchiveOutputDir(self):
196 """Archive all files in the output dir, and return as compressed bytes."""
197 with io.BytesIO() as archive:
198 with zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED) as contents:
199 num_files = 0
200 for absdir, _, files in os.walk(self._output_dir):
201 reldir = os.path.relpath(absdir, self._output_dir)
202 for filename in files:
203 src_path = os.path.join(absdir, filename)
204 # We use normpath to turn './file.txt' into just 'file.txt'.
205 dst_path = os.path.normpath(os.path.join(reldir, filename))
206 contents.write(src_path, dst_path)
207 num_files += 1
208 if num_files:
209 logging.info('%d files in the output dir were archived.', num_files)
210 else:
211 logging.warning('No files in the output dir. Archive is empty.')
212 return archive.getvalue()
213
214
215 class LocalDevicePerfTestRun(local_device_test_run.LocalDeviceTestRun):
216 def __init__(self, env, test_instance):
217 super(LocalDevicePerfTestRun, self).__init__(env, test_instance)
218 self._test_instance = test_instance
219 self._env = env
220 self._timeout = None if test_instance.no_timeout else 60 * 60
221 self._devices = None
222 self._test_buckets = []
223 self._watcher = None
224
225 def SetUp(self):
226 self._devices = self._GetAllDevices(self._env.devices,
227 self._test_instance.known_devices_file)
228 self._watcher = watchdog_timer.WatchdogTimer(self._timeout)
229
230 if (not (self._test_instance.print_step
231 or self._test_instance.output_json_list)):
232 if os.path.exists(constants.PERF_OUTPUT_DIR):
233 shutil.rmtree(constants.PERF_OUTPUT_DIR)
234 os.makedirs(constants.PERF_OUTPUT_DIR)
235
236 def TearDown(self):
237 pass
238
239 def _GetStepsFromDict(self):
240 # From where this is called one of these two must be set.
241 if self._test_instance.single_step:
242 return {
243 'version': 1,
244 'steps': {
245 'single_step': {
246 'device_affinity': 0,
247 'cmd': self._test_instance.single_step
248 },
249 }
250 }
251 if self._test_instance.steps:
252 with file(self._test_instance.steps, 'r') as f:
253 steps = json.load(f)
254 if steps['version'] != 1:
255 raise VersionError(
256 'Version is expected to be %d but was %d' % (1, steps['version']))
257 return steps
258
259 def _SplitTestsByAffinity(self):
260 test_dict = self._GetStepsFromDict()
261 for test, test_config in test_dict['steps'].iteritems():
262 try:
263 affinity = test_config['device_affinity']
264 if len(self._test_buckets) < affinity + 1:
265 while len(self._test_buckets) != affinity + 1:
266 self._test_buckets.append({})
267 self._test_buckets[affinity][test] = test_config
268 except KeyError:
269 logging.exception('Bad test config')
270 return self._test_buckets
271
272 @staticmethod
273 def _GetAllDevices(active_devices, devices_path):
274 try:
275 if devices_path:
276 devices = [device_utils.DeviceUtils(s)
jbudorick 2016/06/28 10:27:32 port over https://codereview.chromium.org/20700430
rnephew (Reviews Here) 2016/06/29 22:27:19 Done.
277 for s in device_list.GetPersistentDeviceList(devices_path)]
278 else:
279 logging.warning('Known devices file path not being passed. For device '
280 'affinity to work properly, it must be passed.')
281 devices = active_devices
282 except IOError as e:
283 logging.error('Unable to find %s [%s]', devices_path, e)
284 devices = active_devices
285 return sorted(devices)
286
287 def RunTests(self):
288 # Option selected for saving a json file with a list of test names.
289 if self._test_instance.output_json_list:
jbudorick 2016/06/28 10:27:32 I'm wondering if these should be subcommands of 'p
rnephew (Reviews Here) 2016/06/29 22:27:19 It could be switched to that if you want; but prob
rnephew (Reviews Here) 2016/06/30 19:10:36 Moved them to their own classes in the same file.
290 return self._test_instance.RunOutputJsonList()
291
292 # Just print the results from a single previously executed step.
293 if self._test_instance.print_step:
294 return self._test_instance.RunPrintStep()
295
296 # Affinitize the tests.
297 test_buckets = self._SplitTestsByAffinity()
298 if not test_buckets:
299 raise local_device_test_run.NoTestsError()
300 threads = []
301 results = []
302 for x in xrange(min(len(self._devices), len(test_buckets))):
jbudorick 2016/06/28 10:27:32 I think you could do this as: def run_perf_test
rnephew (Reviews Here) 2016/06/29 22:27:19 Done.
303 new_shard = TestShard(self._env, self._test_instance, self._devices[x], x,
jbudorick 2016/06/28 10:27:32 I don't think we should make a shard for a device
rnephew (Reviews Here) 2016/06/29 22:27:19 Done.
304 test_buckets[x], results, watcher=self._watcher,
305 retries=self._env.max_tries, timeout=self._timeout)
306 threads.append(reraiser_thread.ReraiserThread(new_shard.RunTestsOnShard))
307
308 workers = reraiser_thread.ReraiserThreadGroup(threads)
309 workers.StartAll()
310
311 workers.JoinAll(self._watcher)
312 return results
313
314 # override
315 def TestPackage(self):
316 return 'perf'
317
318 # override
319 def _CreateShards(self, _tests):
320 raise NotImplementedError
321
322 # override
323 def _GetTests(self):
324 return self._test_buckets
325
326 # override
327 def _RunTest(self, _device, _test):
328 raise NotImplementedError
329
330 # override
331 def _ShouldShard(self):
332 return False
333
jbudorick 2016/06/28 10:27:32 nit: +1 line
rnephew (Reviews Here) 2016/06/29 22:27:19 Done.
334 class VersionError(Exception):
jbudorick 2016/06/28 10:27:32 This name should be more specific.
rnephew (Reviews Here) 2016/06/29 22:27:19 Done.
335 pass
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698