Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(592)

Side by Side Diff: build/android/pylib/perf/test_runner.py

Issue 761903003: Update from https://crrev.com/306655 (Closed) Base URL: git@github.com:domokit/mojo.git@master
Patch Set: Created 6 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « build/android/pylib/perf/test_options.py ('k') | build/android/pylib/results/__init__.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright 2013 The Chromium Authors. All rights reserved. 1 # Copyright 2013 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 """Runs perf tests. 5 """Runs perf tests.
6 6
7 Our buildbot infrastructure requires each slave to run steps serially. 7 Our buildbot infrastructure requires each slave to run steps serially.
8 This is sub-optimal for android, where these steps can run independently on 8 This is sub-optimal for android, where these steps can run independently on
9 multiple connected devices. 9 multiple connected devices.
10 10
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
45 option: 45 option:
46 --device: the serial number to be passed to all adb commands. 46 --device: the serial number to be passed to all adb commands.
47 """ 47 """
48 48
49 import collections 49 import collections
50 import datetime 50 import datetime
51 import json 51 import json
52 import logging 52 import logging
53 import os 53 import os
54 import pickle 54 import pickle
55 import shutil
55 import sys 56 import sys
57 import tempfile
56 import threading 58 import threading
57 import time 59 import time
58 60
59 from pylib import cmd_helper 61 from pylib import cmd_helper
60 from pylib import constants 62 from pylib import constants
61 from pylib import forwarder 63 from pylib import forwarder
62 from pylib.base import base_test_result 64 from pylib.base import base_test_result
63 from pylib.base import base_test_runner 65 from pylib.base import base_test_runner
64 from pylib.device import device_errors 66 from pylib.device import device_errors
65 67
66 68
67 def OutputJsonList(json_input, json_output): 69 def OutputJsonList(json_input, json_output):
68 with file(json_input, 'r') as i: 70 with file(json_input, 'r') as i:
69 all_steps = json.load(i) 71 all_steps = json.load(i)
70 step_names = all_steps['steps'].keys() 72 step_names = all_steps['steps'].keys()
71 with file(json_output, 'w') as o: 73 with file(json_output, 'w') as o:
72 o.write(json.dumps(step_names)) 74 o.write(json.dumps(step_names))
73 return 0 75 return 0
74 76
75 77
78 def OutputChartjson(test_name, json_file_name):
79 file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name)
80 with file(file_name, 'r') as f:
81 persisted_result = pickle.load(f)
82 with open(json_file_name, 'w') as o:
83 o.write(persisted_result['chartjson'])
84
85
76 def PrintTestOutput(test_name): 86 def PrintTestOutput(test_name):
77 """Helper method to print the output of previously executed test_name. 87 """Helper method to print the output of previously executed test_name.
78 88
79 Args: 89 Args:
80 test_name: name of the test that has been previously executed. 90 test_name: name of the test that has been previously executed.
81 91
82 Returns: 92 Returns:
83 exit code generated by the test step. 93 exit code generated by the test step.
84 """ 94 """
85 file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name) 95 file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name)
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
161 max_shards: the maximum shard index. 171 max_shards: the maximum shard index.
162 tests: a dict mapping test_name to command. 172 tests: a dict mapping test_name to command.
163 flaky_tests: a list of flaky test_name. 173 flaky_tests: a list of flaky test_name.
164 """ 174 """
165 super(TestRunner, self).__init__(device, None, 'Release') 175 super(TestRunner, self).__init__(device, None, 'Release')
166 self._options = test_options 176 self._options = test_options
167 self._shard_index = shard_index 177 self._shard_index = shard_index
168 self._max_shard = max_shard 178 self._max_shard = max_shard
169 self._tests = tests 179 self._tests = tests
170 self._flaky_tests = flaky_tests 180 self._flaky_tests = flaky_tests
181 self._output_dir = None
171 182
172 @staticmethod 183 @staticmethod
173 def _IsBetter(result): 184 def _IsBetter(result):
174 if result['actual_exit_code'] == 0: 185 if result['actual_exit_code'] == 0:
175 return True 186 return True
176 pickled = os.path.join(constants.PERF_OUTPUT_DIR, 187 pickled = os.path.join(constants.PERF_OUTPUT_DIR,
177 result['name']) 188 result['name'])
178 if not os.path.exists(pickled): 189 if not os.path.exists(pickled):
179 return True 190 return True
180 with file(pickled, 'r') as f: 191 with file(pickled, 'r') as f:
(...skipping 10 matching lines...) Expand all
191 def _CheckDeviceAffinity(self, test_name): 202 def _CheckDeviceAffinity(self, test_name):
192 """Returns True if test_name has affinity for this shard.""" 203 """Returns True if test_name has affinity for this shard."""
193 affinity = (self._tests['steps'][test_name]['device_affinity'] % 204 affinity = (self._tests['steps'][test_name]['device_affinity'] %
194 self._max_shard) 205 self._max_shard)
195 if self._shard_index == affinity: 206 if self._shard_index == affinity:
196 return True 207 return True
197 logging.info('Skipping %s on %s (affinity is %s, device is %s)', 208 logging.info('Skipping %s on %s (affinity is %s, device is %s)',
198 test_name, self.device_serial, affinity, self._shard_index) 209 test_name, self.device_serial, affinity, self._shard_index)
199 return False 210 return False
200 211
212 def _CleanupOutputDirectory(self):
213 if self._output_dir:
214 shutil.rmtree(self._output_dir, ignore_errors=True)
215 self._output_dir = None
216
217 def _ReadChartjsonOutput(self):
218 if not self._output_dir:
219 return ''
220
221 json_output_path = os.path.join(self._output_dir, 'results-chart.json')
222 with open(json_output_path) as f:
223 return f.read()
224
201 def _LaunchPerfTest(self, test_name): 225 def _LaunchPerfTest(self, test_name):
202 """Runs a perf test. 226 """Runs a perf test.
203 227
204 Args: 228 Args:
205 test_name: the name of the test to be executed. 229 test_name: the name of the test to be executed.
206 230
207 Returns: 231 Returns:
208 A tuple containing (Output, base_test_result.ResultType) 232 A tuple containing (Output, base_test_result.ResultType)
209 """ 233 """
210 if not self._CheckDeviceAffinity(test_name): 234 if not self._CheckDeviceAffinity(test_name):
211 return '', base_test_result.ResultType.PASS 235 return '', base_test_result.ResultType.PASS
212 236
213 try: 237 try:
214 logging.warning('Unmapping device ports') 238 logging.warning('Unmapping device ports')
215 forwarder.Forwarder.UnmapAllDevicePorts(self.device) 239 forwarder.Forwarder.UnmapAllDevicePorts(self.device)
216 self.device.old_interface.RestartAdbdOnDevice() 240 self.device.old_interface.RestartAdbdOnDevice()
217 except Exception as e: 241 except Exception as e:
218 logging.error('Exception when tearing down device %s', e) 242 logging.error('Exception when tearing down device %s', e)
219 243
220 cmd = ('%s --device %s' % 244 cmd = ('%s --device %s' %
221 (self._tests['steps'][test_name]['cmd'], 245 (self._tests['steps'][test_name]['cmd'],
222 self.device_serial)) 246 self.device_serial))
247
248 if self._options.collect_chartjson_data:
249 self._output_dir = tempfile.mkdtemp()
250 cmd = cmd + ' --output-dir=%s' % self._output_dir
251
223 logging.info('%s : %s', test_name, cmd) 252 logging.info('%s : %s', test_name, cmd)
224 start_time = datetime.datetime.now() 253 start_time = datetime.datetime.now()
225 254
226 timeout = 5400 255 timeout = 5400
227 if self._options.no_timeout: 256 if self._options.no_timeout:
228 timeout = None 257 timeout = None
229 full_cmd = cmd 258 full_cmd = cmd
230 if self._options.dry_run: 259 if self._options.dry_run:
231 full_cmd = 'echo %s' % cmd 260 full_cmd = 'echo %s' % cmd
232 261
233 logfile = sys.stdout 262 logfile = sys.stdout
234 if self._options.single_step: 263 if self._options.single_step:
235 # Just print a heart-beat so that the outer buildbot scripts won't timeout 264 # Just print a heart-beat so that the outer buildbot scripts won't timeout
236 # without response. 265 # without response.
237 logfile = _HeartBeatLogger() 266 logfile = _HeartBeatLogger()
238 cwd = os.path.abspath(constants.DIR_SOURCE_ROOT) 267 cwd = os.path.abspath(constants.DIR_SOURCE_ROOT)
239 if full_cmd.startswith('src/'): 268 if full_cmd.startswith('src/'):
240 cwd = os.path.abspath(os.path.join(constants.DIR_SOURCE_ROOT, os.pardir)) 269 cwd = os.path.abspath(os.path.join(constants.DIR_SOURCE_ROOT, os.pardir))
241 try: 270 try:
242 exit_code, output = cmd_helper.GetCmdStatusAndOutputWithTimeout( 271 exit_code, output = cmd_helper.GetCmdStatusAndOutputWithTimeout(
243 full_cmd, timeout, cwd=cwd, shell=True, logfile=logfile) 272 full_cmd, timeout, cwd=cwd, shell=True, logfile=logfile)
273 json_output = self._ReadChartjsonOutput()
244 except cmd_helper.TimeoutError as e: 274 except cmd_helper.TimeoutError as e:
245 exit_code = -1 275 exit_code = -1
246 output = str(e) 276 output = str(e)
277 json_output = ''
247 finally: 278 finally:
279 self._CleanupOutputDirectory()
248 if self._options.single_step: 280 if self._options.single_step:
249 logfile.stop() 281 logfile.stop()
250 end_time = datetime.datetime.now() 282 end_time = datetime.datetime.now()
251 if exit_code is None: 283 if exit_code is None:
252 exit_code = -1 284 exit_code = -1
253 logging.info('%s : exit_code=%d in %d secs at %s', 285 logging.info('%s : exit_code=%d in %d secs at %s',
254 test_name, exit_code, (end_time - start_time).seconds, 286 test_name, exit_code, (end_time - start_time).seconds,
255 self.device_serial) 287 self.device_serial)
256 288
257 if exit_code == 0: 289 if exit_code == 0:
(...skipping 12 matching lines...) Expand all
270 if test_name in self._flaky_tests: 302 if test_name in self._flaky_tests:
271 # The exit_code is used at the second stage when printing the 303 # The exit_code is used at the second stage when printing the
272 # test output. If the test is flaky, force to "0" to get that step green 304 # test output. If the test is flaky, force to "0" to get that step green
273 # whilst still gathering data to the perf dashboards. 305 # whilst still gathering data to the perf dashboards.
274 # The result_type is used by the test_dispatcher to retry the test. 306 # The result_type is used by the test_dispatcher to retry the test.
275 exit_code = 0 307 exit_code = 0
276 308
277 persisted_result = { 309 persisted_result = {
278 'name': test_name, 310 'name': test_name,
279 'output': output, 311 'output': output,
312 'chartjson': json_output,
280 'exit_code': exit_code, 313 'exit_code': exit_code,
281 'actual_exit_code': actual_exit_code, 314 'actual_exit_code': actual_exit_code,
282 'result_type': result_type, 315 'result_type': result_type,
283 'total_time': (end_time - start_time).seconds, 316 'total_time': (end_time - start_time).seconds,
284 'device': self.device_serial, 317 'device': self.device_serial,
285 'cmd': cmd, 318 'cmd': cmd,
286 } 319 }
287 self._SaveResult(persisted_result) 320 self._SaveResult(persisted_result)
288 321
289 return (output, result_type) 322 return (output, result_type)
290 323
291 def RunTest(self, test_name): 324 def RunTest(self, test_name):
292 """Run a perf test on the device. 325 """Run a perf test on the device.
293 326
294 Args: 327 Args:
295 test_name: String to use for logging the test result. 328 test_name: String to use for logging the test result.
296 329
297 Returns: 330 Returns:
298 A tuple of (TestRunResults, retry). 331 A tuple of (TestRunResults, retry).
299 """ 332 """
300 _, result_type = self._LaunchPerfTest(test_name) 333 _, result_type = self._LaunchPerfTest(test_name)
301 results = base_test_result.TestRunResults() 334 results = base_test_result.TestRunResults()
302 results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) 335 results.AddResult(base_test_result.BaseTestResult(test_name, result_type))
303 retry = None 336 retry = None
304 if not results.DidRunPass(): 337 if not results.DidRunPass():
305 retry = test_name 338 retry = test_name
306 return results, retry 339 return results, retry
OLDNEW
« no previous file with comments | « build/android/pylib/perf/test_options.py ('k') | build/android/pylib/results/__init__.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698