Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(288)

Side by Side Diff: build/android/pylib/instrumentation/test_runner.py

Issue 221823011: [Android] Change object types from AndroidCommands to DeviceUtils in build/android/. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Address Frank's comments. Created 6 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 """Class for running instrumentation tests on a single device.""" 5 """Class for running instrumentation tests on a single device."""
6 6
7 import logging 7 import logging
8 import os 8 import os
9 import re 9 import re
10 import sys 10 import sys
11 import time 11 import time
12 12
13 from pylib import android_commands 13 from pylib import android_commands
14 from pylib import constants 14 from pylib import constants
15 from pylib import flag_changer 15 from pylib import flag_changer
16 from pylib import valgrind_tools 16 from pylib import valgrind_tools
17 from pylib.base import base_test_result 17 from pylib.base import base_test_result
18 from pylib.base import base_test_runner 18 from pylib.base import base_test_runner
19 from pylib.device import adb_wrapper
19 from pylib.instrumentation import json_perf_parser 20 from pylib.instrumentation import json_perf_parser
20 from pylib.instrumentation import test_result 21 from pylib.instrumentation import test_result
21 22
22 sys.path.append(os.path.join(sys.path[0], 23 sys.path.append(os.path.join(sys.path[0],
23 os.pardir, os.pardir, 'build', 'util', 'lib', 24 os.pardir, os.pardir, 'build', 'util', 'lib',
24 'common')) 25 'common'))
25 import perf_tests_results_helper # pylint: disable=F0401 26 import perf_tests_results_helper # pylint: disable=F0401
26 27
27 28
28 _PERF_TEST_ANNOTATION = 'PerfTest' 29 _PERF_TEST_ANNOTATION = 'PerfTest'
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
74 self.coverage_device_file = None 75 self.coverage_device_file = None
75 self.coverage_dir = test_options.coverage_dir 76 self.coverage_dir = test_options.coverage_dir
76 self.coverage_host_file = None 77 self.coverage_host_file = None
77 self.options = test_options 78 self.options = test_options
78 self.test_pkg = test_pkg 79 self.test_pkg = test_pkg
79 # Use the correct command line file for the package under test. 80 # Use the correct command line file for the package under test.
80 cmdline_file = [a.cmdline_file for a in constants.PACKAGE_INFO.itervalues() 81 cmdline_file = [a.cmdline_file for a in constants.PACKAGE_INFO.itervalues()
81 if a.test_package == self.test_pkg.GetPackageName()] 82 if a.test_package == self.test_pkg.GetPackageName()]
82 assert len(cmdline_file) < 2, 'Multiple packages have the same test package' 83 assert len(cmdline_file) < 2, 'Multiple packages have the same test package'
83 if len(cmdline_file) and cmdline_file[0]: 84 if len(cmdline_file) and cmdline_file[0]:
84 self.flags = flag_changer.FlagChanger(self.adb, cmdline_file[0]) 85 self.flags = flag_changer.FlagChanger(self.device, cmdline_file[0])
85 if additional_flags: 86 if additional_flags:
86 self.flags.AddFlags(additional_flags) 87 self.flags.AddFlags(additional_flags)
87 else: 88 else:
88 self.flags = None 89 self.flags = None
89 90
90 #override 91 #override
91 def InstallTestPackage(self): 92 def InstallTestPackage(self):
92 self.test_pkg.Install(self.adb) 93 self.test_pkg.Install(self.device)
93 94
94 #override 95 #override
95 def PushDataDeps(self): 96 def PushDataDeps(self):
96 # TODO(frankf): Implement a general approach for copying/installing 97 # TODO(frankf): Implement a general approach for copying/installing
97 # once across test runners. 98 # once across test runners.
98 if TestRunner._DEVICE_HAS_TEST_FILES.get(self.device, False): 99 if TestRunner._DEVICE_HAS_TEST_FILES.get(self.device, False):
99 logging.warning('Already copied test files to device %s, skipping.', 100 logging.warning('Already copied test files to device %s, skipping.',
100 self.device) 101 str(self.device))
101 return 102 return
102 103
103 test_data = _GetDataFilesForTestSuite(self.test_pkg.GetApkName()) 104 test_data = _GetDataFilesForTestSuite(self.test_pkg.GetApkName())
104 if test_data: 105 if test_data:
105 # Make sure SD card is ready. 106 # Make sure SD card is ready.
106 self.adb.WaitForSdCardReady(20) 107 self.device.old_interface.WaitForSdCardReady(20)
107 for p in test_data: 108 for p in test_data:
108 self.adb.PushIfNeeded( 109 self.device.old_interface.PushIfNeeded(
109 os.path.join(constants.DIR_SOURCE_ROOT, p), 110 os.path.join(constants.DIR_SOURCE_ROOT, p),
110 os.path.join(self.adb.GetExternalStorage(), p)) 111 os.path.join(self.device.old_interface.GetExternalStorage(), p))
111 112
112 # TODO(frankf): Specify test data in this file as opposed to passing 113 # TODO(frankf): Specify test data in this file as opposed to passing
113 # as command-line. 114 # as command-line.
114 for dest_host_pair in self.options.test_data: 115 for dest_host_pair in self.options.test_data:
115 dst_src = dest_host_pair.split(':', 1) 116 dst_src = dest_host_pair.split(':', 1)
116 dst_layer = dst_src[0] 117 dst_layer = dst_src[0]
117 host_src = dst_src[1] 118 host_src = dst_src[1]
118 host_test_files_path = '%s/%s' % (constants.DIR_SOURCE_ROOT, host_src) 119 host_test_files_path = '%s/%s' % (constants.DIR_SOURCE_ROOT, host_src)
119 if os.path.exists(host_test_files_path): 120 if os.path.exists(host_test_files_path):
120 self.adb.PushIfNeeded(host_test_files_path, '%s/%s/%s' % ( 121 self.device.old_interface.PushIfNeeded(
121 self.adb.GetExternalStorage(), TestRunner._DEVICE_DATA_DIR, 122 host_test_files_path,
122 dst_layer)) 123 '%s/%s/%s' % (
124 self.device.old_interface.GetExternalStorage(),
125 TestRunner._DEVICE_DATA_DIR,
126 dst_layer))
123 self.tool.CopyFiles() 127 self.tool.CopyFiles()
124 TestRunner._DEVICE_HAS_TEST_FILES[self.device] = True 128 TestRunner._DEVICE_HAS_TEST_FILES[str(self.device)] = True
125 129
126 def _GetInstrumentationArgs(self): 130 def _GetInstrumentationArgs(self):
127 ret = {} 131 ret = {}
128 if self.options.wait_for_debugger: 132 if self.options.wait_for_debugger:
129 ret['debug'] = 'true' 133 ret['debug'] = 'true'
130 if self.coverage_dir: 134 if self.coverage_dir:
131 ret['coverage'] = 'true' 135 ret['coverage'] = 'true'
132 ret['coverageFile'] = self.coverage_device_file 136 ret['coverageFile'] = self.coverage_device_file
133 137
134 return ret 138 return ret
135 139
136 def _TakeScreenshot(self, test): 140 def _TakeScreenshot(self, test):
137 """Takes a screenshot from the device.""" 141 """Takes a screenshot from the device."""
138 screenshot_name = os.path.join(constants.SCREENSHOTS_DIR, '%s.png' % test) 142 screenshot_name = os.path.join(constants.SCREENSHOTS_DIR, '%s.png' % test)
139 logging.info('Taking screenshot named %s', screenshot_name) 143 logging.info('Taking screenshot named %s', screenshot_name)
140 self.adb.TakeScreenshot(screenshot_name) 144 self.device.old_interface.TakeScreenshot(screenshot_name)
141 145
142 def SetUp(self): 146 def SetUp(self):
143 """Sets up the test harness and device before all tests are run.""" 147 """Sets up the test harness and device before all tests are run."""
144 super(TestRunner, self).SetUp() 148 super(TestRunner, self).SetUp()
145 if not self.adb.IsRootEnabled(): 149 if not self.device.old_interface.IsRootEnabled():
146 logging.warning('Unable to enable java asserts for %s, non rooted device', 150 logging.warning('Unable to enable java asserts for %s, non rooted device',
147 self.device) 151 str(self.device))
148 else: 152 else:
149 if self.adb.SetJavaAssertsEnabled(True): 153 if self.device.old_interface.SetJavaAssertsEnabled(True):
150 self.adb.Reboot(full_reboot=False) 154 self.device.old_interface.Reboot(full_reboot=False)
151 155
152 # We give different default value to launch HTTP server based on shard index 156 # We give different default value to launch HTTP server based on shard index
153 # because it may have race condition when multiple processes are trying to 157 # because it may have race condition when multiple processes are trying to
154 # launch lighttpd with same port at same time. 158 # launch lighttpd with same port at same time.
155 self.LaunchTestHttpServer( 159 self.LaunchTestHttpServer(
156 os.path.join(constants.DIR_SOURCE_ROOT), self._lighttp_port) 160 os.path.join(constants.DIR_SOURCE_ROOT), self._lighttp_port)
157 if self.flags: 161 if self.flags:
158 self.flags.AddFlags(['--disable-fre', '--enable-test-intents']) 162 self.flags.AddFlags(['--disable-fre', '--enable-test-intents'])
159 163
160 def TearDown(self): 164 def TearDown(self):
(...skipping 10 matching lines...) Expand all
171 """ 175 """
172 self.SetupPerfMonitoringIfNeeded(test) 176 self.SetupPerfMonitoringIfNeeded(test)
173 self._SetupIndividualTestTimeoutScale(test) 177 self._SetupIndividualTestTimeoutScale(test)
174 self.tool.SetupEnvironment() 178 self.tool.SetupEnvironment()
175 179
176 # Make sure the forwarder is still running. 180 # Make sure the forwarder is still running.
177 self._RestartHttpServerForwarderIfNecessary() 181 self._RestartHttpServerForwarderIfNecessary()
178 182
179 if self.coverage_dir: 183 if self.coverage_dir:
180 coverage_basename = '%s.ec' % test 184 coverage_basename = '%s.ec' % test
181 self.coverage_device_file = '%s/%s/%s' % (self.adb.GetExternalStorage(), 185 self.coverage_device_file = '%s/%s/%s' % (
182 TestRunner._DEVICE_COVERAGE_DIR, 186 self.device.old_interface.GetExternalStorage(),
183 coverage_basename) 187 TestRunner._DEVICE_COVERAGE_DIR, coverage_basename)
184 self.coverage_host_file = os.path.join( 188 self.coverage_host_file = os.path.join(
185 self.coverage_dir, coverage_basename) 189 self.coverage_dir, coverage_basename)
186 190
187 def _IsPerfTest(self, test): 191 def _IsPerfTest(self, test):
188 """Determines whether a test is a performance test. 192 """Determines whether a test is a performance test.
189 193
190 Args: 194 Args:
191 test: The name of the test to be checked. 195 test: The name of the test to be checked.
192 196
193 Returns: 197 Returns:
194 Whether the test is annotated as a performance test. 198 Whether the test is annotated as a performance test.
195 """ 199 """
196 return _PERF_TEST_ANNOTATION in self.test_pkg.GetTestAnnotations(test) 200 return _PERF_TEST_ANNOTATION in self.test_pkg.GetTestAnnotations(test)
197 201
198 def SetupPerfMonitoringIfNeeded(self, test): 202 def SetupPerfMonitoringIfNeeded(self, test):
199 """Sets up performance monitoring if the specified test requires it. 203 """Sets up performance monitoring if the specified test requires it.
200 204
201 Args: 205 Args:
202 test: The name of the test to be run. 206 test: The name of the test to be run.
203 """ 207 """
204 if not self._IsPerfTest(test): 208 if not self._IsPerfTest(test):
205 return 209 return
206 self.adb.Adb().SendCommand('shell rm ' + 210 self.device.old_interface.Adb().SendCommand(
207 TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX) 211 'shell rm ' + TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX)
208 self.adb.StartMonitoringLogcat() 212 self.device.old_interface.StartMonitoringLogcat()
209 213
210 def TestTeardown(self, test, raw_result): 214 def TestTeardown(self, test, raw_result):
211 """Cleans up the test harness after running a particular test. 215 """Cleans up the test harness after running a particular test.
212 216
213 Depending on the options of this TestRunner this might handle performance 217 Depending on the options of this TestRunner this might handle performance
214 tracking. This method will only be called if the test passed. 218 tracking. This method will only be called if the test passed.
215 219
216 Args: 220 Args:
217 test: The name of the test that was just run. 221 test: The name of the test that was just run.
218 raw_result: result for this test. 222 raw_result: result for this test.
219 """ 223 """
220 224
221 self.tool.CleanUpEnvironment() 225 self.tool.CleanUpEnvironment()
222 226
223 # The logic below relies on the test passing. 227 # The logic below relies on the test passing.
224 if not raw_result or raw_result.GetStatusCode(): 228 if not raw_result or raw_result.GetStatusCode():
225 return 229 return
226 230
227 self.TearDownPerfMonitoring(test) 231 self.TearDownPerfMonitoring(test)
228 232
229 if self.coverage_dir: 233 if self.coverage_dir:
230 self.adb.Adb().Pull(self.coverage_device_file, self.coverage_host_file) 234 self.device.old_interface.Adb().Pull(
231 self.adb.RunShellCommand('rm -f %s' % self.coverage_device_file) 235 self.coverage_device_file, self.coverage_host_file)
236 self.device.old_interface.RunShellCommand(
237 'rm -f %s' % self.coverage_device_file)
232 238
233 def TearDownPerfMonitoring(self, test): 239 def TearDownPerfMonitoring(self, test):
234 """Cleans up performance monitoring if the specified test required it. 240 """Cleans up performance monitoring if the specified test required it.
235 241
236 Args: 242 Args:
237 test: The name of the test that was just run. 243 test: The name of the test that was just run.
238 Raises: 244 Raises:
239 Exception: if there's anything wrong with the perf data. 245 Exception: if there's anything wrong with the perf data.
240 """ 246 """
241 if not self._IsPerfTest(test): 247 if not self._IsPerfTest(test):
242 return 248 return
243 raw_test_name = test.split('#')[1] 249 raw_test_name = test.split('#')[1]
244 250
245 # Wait and grab annotation data so we can figure out which traces to parse 251 # Wait and grab annotation data so we can figure out which traces to parse
246 regex = self.adb.WaitForLogMatch(re.compile('\*\*PERFANNOTATION\(' + 252 regex = self.device.old_interface.WaitForLogMatch(
247 raw_test_name + 253 re.compile('\*\*PERFANNOTATION\(' + raw_test_name + '\)\:(.*)'), None)
248 '\)\:(.*)'), None)
249 254
250 # If the test is set to run on a specific device type only (IE: only 255 # If the test is set to run on a specific device type only (IE: only
251 # tablet or phone) and it is being run on the wrong device, the test 256 # tablet or phone) and it is being run on the wrong device, the test
252 # just quits and does not do anything. The java test harness will still 257 # just quits and does not do anything. The java test harness will still
253 # print the appropriate annotation for us, but will add --NORUN-- for 258 # print the appropriate annotation for us, but will add --NORUN-- for
254 # us so we know to ignore the results. 259 # us so we know to ignore the results.
255 # The --NORUN-- tag is managed by MainActivityTestBase.java 260 # The --NORUN-- tag is managed by MainActivityTestBase.java
256 if regex.group(1) != '--NORUN--': 261 if regex.group(1) != '--NORUN--':
257 262
258 # Obtain the relevant perf data. The data is dumped to a 263 # Obtain the relevant perf data. The data is dumped to a
259 # JSON formatted file. 264 # JSON formatted file.
260 json_string = self.adb.GetProtectedFileContents( 265 json_string = self.device.old_interface.GetProtectedFileContents(
261 '/data/data/com.google.android.apps.chrome/files/PerfTestData.txt') 266 '/data/data/com.google.android.apps.chrome/files/PerfTestData.txt')
262 267
263 if json_string: 268 if json_string:
264 json_string = '\n'.join(json_string) 269 json_string = '\n'.join(json_string)
265 else: 270 else:
266 raise Exception('Perf file does not exist or is empty') 271 raise Exception('Perf file does not exist or is empty')
267 272
268 if self.options.save_perf_json: 273 if self.options.save_perf_json:
269 json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name 274 json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name
270 with open(json_local_file, 'w') as f: 275 with open(json_local_file, 'w') as f:
(...skipping 12 matching lines...) Expand all
283 288
284 # Process the performance data 289 # Process the performance data
285 result = json_perf_parser.GetAverageRunInfoFromJSONString(json_string, 290 result = json_perf_parser.GetAverageRunInfoFromJSONString(json_string,
286 perf_set[0]) 291 perf_set[0])
287 perf_tests_results_helper.PrintPerfResult(perf_set[1], perf_set[2], 292 perf_tests_results_helper.PrintPerfResult(perf_set[1], perf_set[2],
288 [result['average']], 293 [result['average']],
289 result['units']) 294 result['units'])
290 295
291 def _SetupIndividualTestTimeoutScale(self, test): 296 def _SetupIndividualTestTimeoutScale(self, test):
292 timeout_scale = self._GetIndividualTestTimeoutScale(test) 297 timeout_scale = self._GetIndividualTestTimeoutScale(test)
293 valgrind_tools.SetChromeTimeoutScale(self.adb, timeout_scale) 298 valgrind_tools.SetChromeTimeoutScale(self.device, timeout_scale)
294 299
295 def _GetIndividualTestTimeoutScale(self, test): 300 def _GetIndividualTestTimeoutScale(self, test):
296 """Returns the timeout scale for the given |test|.""" 301 """Returns the timeout scale for the given |test|."""
297 annotations = self.test_pkg.GetTestAnnotations(test) 302 annotations = self.test_pkg.GetTestAnnotations(test)
298 timeout_scale = 1 303 timeout_scale = 1
299 if 'TimeoutScale' in annotations: 304 if 'TimeoutScale' in annotations:
300 for annotation in annotations: 305 for annotation in annotations:
301 scale_match = re.match('TimeoutScale:([0-9]+)', annotation) 306 scale_match = re.match('TimeoutScale:([0-9]+)', annotation)
302 if scale_match: 307 if scale_match:
303 timeout_scale = int(scale_match.group(1)) 308 timeout_scale = int(scale_match.group(1))
304 if self.options.wait_for_debugger: 309 if self.options.wait_for_debugger:
305 timeout_scale *= 100 310 timeout_scale *= 100
306 return timeout_scale 311 return timeout_scale
307 312
308 def _GetIndividualTestTimeoutSecs(self, test): 313 def _GetIndividualTestTimeoutSecs(self, test):
309 """Returns the timeout in seconds for the given |test|.""" 314 """Returns the timeout in seconds for the given |test|."""
310 annotations = self.test_pkg.GetTestAnnotations(test) 315 annotations = self.test_pkg.GetTestAnnotations(test)
311 if 'Manual' in annotations: 316 if 'Manual' in annotations:
312 return 600 * 60 317 return 600 * 60
313 if 'External' in annotations: 318 if 'External' in annotations:
314 return 10 * 60 319 return 10 * 60
315 if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations: 320 if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations:
316 return 5 * 60 321 return 5 * 60
317 if 'MediumTest' in annotations: 322 if 'MediumTest' in annotations:
318 return 3 * 60 323 return 3 * 60
319 return 1 * 60 324 return 1 * 60
320 325
321 def _RunTest(self, test, timeout): 326 def _RunTest(self, test, timeout):
322 try: 327 try:
323 return self.adb.RunInstrumentationTest( 328 return self.device.old_interface.RunInstrumentationTest(
324 test, self.test_pkg.GetPackageName(), 329 test, self.test_pkg.GetPackageName(),
325 self._GetInstrumentationArgs(), timeout) 330 self._GetInstrumentationArgs(), timeout)
326 except android_commands.errors.WaitForResponseTimedOutError: 331 except adb_wrapper.CommandTimeoutError:
327 logging.info('Ran the test with timeout of %ds.' % timeout) 332 logging.info('Ran the test with timeout of %ds.' % timeout)
328 raise 333 raise
329 334
330 #override 335 #override
331 def RunTest(self, test): 336 def RunTest(self, test):
332 raw_result = None 337 raw_result = None
333 start_date_ms = None 338 start_date_ms = None
334 results = base_test_result.TestRunResults() 339 results = base_test_result.TestRunResults()
335 timeout = (self._GetIndividualTestTimeoutSecs(test) * 340 timeout = (self._GetIndividualTestTimeoutSecs(test) *
336 self._GetIndividualTestTimeoutScale(test) * 341 self._GetIndividualTestTimeoutScale(test) *
337 self.tool.GetTimeoutScale()) 342 self.tool.GetTimeoutScale())
338 try: 343 try:
339 self.TestSetup(test) 344 self.TestSetup(test)
340 start_date_ms = int(time.time()) * 1000 345 start_date_ms = int(time.time()) * 1000
341 raw_result = self._RunTest(test, timeout) 346 raw_result = self._RunTest(test, timeout)
342 duration_ms = int(time.time()) * 1000 - start_date_ms 347 duration_ms = int(time.time()) * 1000 - start_date_ms
343 status_code = raw_result.GetStatusCode() 348 status_code = raw_result.GetStatusCode()
344 if status_code: 349 if status_code:
345 if self.options.screenshot_failures: 350 if self.options.screenshot_failures:
346 self._TakeScreenshot(test) 351 self._TakeScreenshot(test)
347 log = raw_result.GetFailureReason() 352 log = raw_result.GetFailureReason()
348 if not log: 353 if not log:
349 log = 'No information.' 354 log = 'No information.'
350 result_type = base_test_result.ResultType.FAIL 355 result_type = base_test_result.ResultType.FAIL
351 package = self.adb.DismissCrashDialogIfNeeded() 356 package = self.device.old_interface.DismissCrashDialogIfNeeded()
352 # Assume test package convention of ".test" suffix 357 # Assume test package convention of ".test" suffix
353 if package and package in self.test_pkg.GetPackageName(): 358 if package and package in self.test_pkg.GetPackageName():
354 result_type = base_test_result.ResultType.CRASH 359 result_type = base_test_result.ResultType.CRASH
355 result = test_result.InstrumentationTestResult( 360 result = test_result.InstrumentationTestResult(
356 test, result_type, start_date_ms, duration_ms, log=log) 361 test, result_type, start_date_ms, duration_ms, log=log)
357 else: 362 else:
358 result = test_result.InstrumentationTestResult( 363 result = test_result.InstrumentationTestResult(
359 test, base_test_result.ResultType.PASS, start_date_ms, duration_ms) 364 test, base_test_result.ResultType.PASS, start_date_ms, duration_ms)
360 results.AddResult(result) 365 results.AddResult(result)
361 # Catch exceptions thrown by StartInstrumentation(). 366 # Catch exceptions thrown by StartInstrumentation().
362 # See ../../third_party/android/testrunner/adb_interface.py 367 # See ../../third_party/android/testrunner/adb_interface.py
363 except (android_commands.errors.WaitForResponseTimedOutError, 368 except (adb_wrapper.CommandTimeoutError,
364 android_commands.errors.DeviceUnresponsiveError, 369 adb_wrapper.DeviceUnreachableError,
370 # TODO(jbudorick) Remove this once the underlying implementations
371 # for the above are switched or wrapped.
365 android_commands.errors.InstrumentationError), e: 372 android_commands.errors.InstrumentationError), e:
366 if start_date_ms: 373 if start_date_ms:
367 duration_ms = int(time.time()) * 1000 - start_date_ms 374 duration_ms = int(time.time()) * 1000 - start_date_ms
368 else: 375 else:
369 start_date_ms = int(time.time()) * 1000 376 start_date_ms = int(time.time()) * 1000
370 duration_ms = 0 377 duration_ms = 0
371 message = str(e) 378 message = str(e)
372 if not message: 379 if not message:
373 message = 'No information.' 380 message = 'No information.'
374 results.AddResult(test_result.InstrumentationTestResult( 381 results.AddResult(test_result.InstrumentationTestResult(
375 test, base_test_result.ResultType.CRASH, start_date_ms, duration_ms, 382 test, base_test_result.ResultType.CRASH, start_date_ms, duration_ms,
376 log=message)) 383 log=message))
377 raw_result = None 384 raw_result = None
378 self.TestTeardown(test, raw_result) 385 self.TestTeardown(test, raw_result)
379 return (results, None if results.DidRunPass() else test) 386 return (results, None if results.DidRunPass() else test)
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698