Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(272)

Side by Side Diff: build/android/pylib/instrumentation/test_runner.py

Issue 896503002: [Android] Add LogcatMonitor. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 """Class for running instrumentation tests on a single device.""" 5 """Class for running instrumentation tests on a single device."""
6 6
7 import logging 7 import logging
8 import os 8 import os
9 import re 9 import re
10 import sys 10 import sys
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
44 Args: 44 Args:
45 test_options: An InstrumentationOptions object. 45 test_options: An InstrumentationOptions object.
46 device: Attached android device. 46 device: Attached android device.
47 shard_index: Shard index. 47 shard_index: Shard index.
48 test_pkg: A TestPackage object. 48 test_pkg: A TestPackage object.
49 additional_flags: A list of additional flags to add to the command line. 49 additional_flags: A list of additional flags to add to the command line.
50 """ 50 """
51 super(TestRunner, self).__init__(device, test_options.tool, 51 super(TestRunner, self).__init__(device, test_options.tool,
52 test_options.cleanup_test_files) 52 test_options.cleanup_test_files)
53 self._lighttp_port = constants.LIGHTTPD_RANDOM_PORT_FIRST + shard_index 53 self._lighttp_port = constants.LIGHTTPD_RANDOM_PORT_FIRST + shard_index
54 self._logcat_monitor = None
54 55
55 self.coverage_device_file = None 56 self.coverage_device_file = None
56 self.coverage_dir = test_options.coverage_dir 57 self.coverage_dir = test_options.coverage_dir
57 self.coverage_host_file = None 58 self.coverage_host_file = None
58 self.options = test_options 59 self.options = test_options
59 self.test_pkg = test_pkg 60 self.test_pkg = test_pkg
60 # Use the correct command line file for the package under test. 61 # Use the correct command line file for the package under test.
61 cmdline_file = [a.cmdline_file for a in constants.PACKAGE_INFO.itervalues() 62 cmdline_file = [a.cmdline_file for a in constants.PACKAGE_INFO.itervalues()
62 if a.test_package == self.test_pkg.GetPackageName()] 63 if a.test_package == self.test_pkg.GetPackageName()]
63 assert len(cmdline_file) < 2, 'Multiple packages have the same test package' 64 assert len(cmdline_file) < 2, 'Multiple packages have the same test package'
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after
167 return _PERF_TEST_ANNOTATION in self.test_pkg.GetTestAnnotations(test) 168 return _PERF_TEST_ANNOTATION in self.test_pkg.GetTestAnnotations(test)
168 169
169 def SetupPerfMonitoringIfNeeded(self, test): 170 def SetupPerfMonitoringIfNeeded(self, test):
170 """Sets up performance monitoring if the specified test requires it. 171 """Sets up performance monitoring if the specified test requires it.
171 172
172 Args: 173 Args:
173 test: The name of the test to be run. 174 test: The name of the test to be run.
174 """ 175 """
175 if not self._IsPerfTest(test): 176 if not self._IsPerfTest(test):
176 return 177 return
177 self.device.old_interface.Adb().SendCommand( 178 self.device.RunShellCommand(
178 'shell rm ' + TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX) 179 ['rm', TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX])
179 self.device.old_interface.StartMonitoringLogcat() 180 self._logcat_monitor = self.device.GetLogcatMonitor()
181 self._logcat_monitor.Start()
180 182
181 def TestTeardown(self, test, result): 183 def TestTeardown(self, test, result):
182 """Cleans up the test harness after running a particular test. 184 """Cleans up the test harness after running a particular test.
183 185
184 Depending on the options of this TestRunner this might handle performance 186 Depending on the options of this TestRunner this might handle performance
185 tracking. This method will only be called if the test passed. 187 tracking. This method will only be called if the test passed.
186 188
187 Args: 189 Args:
188 test: The name of the test that was just run. 190 test: The name of the test that was just run.
189 result: result for this test. 191 result: result for this test.
(...skipping 22 matching lines...) Expand all
212 Args: 214 Args:
213 test: The name of the test that was just run. 215 test: The name of the test that was just run.
214 Raises: 216 Raises:
215 Exception: if there's anything wrong with the perf data. 217 Exception: if there's anything wrong with the perf data.
216 """ 218 """
217 if not self._IsPerfTest(test): 219 if not self._IsPerfTest(test):
218 return 220 return
219 raw_test_name = test.split('#')[1] 221 raw_test_name = test.split('#')[1]
220 222
221 # Wait and grab annotation data so we can figure out which traces to parse 223 # Wait and grab annotation data so we can figure out which traces to parse
222 regex = self.device.old_interface.WaitForLogMatch( 224 regex = self._logcat_monitor.WaitFor(
223 re.compile(r'\*\*PERFANNOTATION\(' + raw_test_name + r'\)\:(.*)'), 225 re.compile(r'\*\*PERFANNOTATION\(' + raw_test_name + r'\)\:(.*)'))
224 None)
225 226
226 # If the test is set to run on a specific device type only (IE: only 227 # If the test is set to run on a specific device type only (IE: only
227 # tablet or phone) and it is being run on the wrong device, the test 228 # tablet or phone) and it is being run on the wrong device, the test
228 # just quits and does not do anything. The java test harness will still 229 # just quits and does not do anything. The java test harness will still
229 # print the appropriate annotation for us, but will add --NORUN-- for 230 # print the appropriate annotation for us, but will add --NORUN-- for
230 # us so we know to ignore the results. 231 # us so we know to ignore the results.
231 # The --NORUN-- tag is managed by MainActivityTestBase.java 232 # The --NORUN-- tag is managed by MainActivityTestBase.java
232 if regex.group(1) != '--NORUN--': 233 if regex.group(1) != '--NORUN--':
233 234
234 # Obtain the relevant perf data. The data is dumped to a 235 # Obtain the relevant perf data. The data is dumped to a
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after
354 except device_errors.CommandTimeoutError as e: 355 except device_errors.CommandTimeoutError as e:
355 results.AddResult(test_result.InstrumentationTestResult( 356 results.AddResult(test_result.InstrumentationTestResult(
356 test, base_test_result.ResultType.TIMEOUT, start_ms, duration_ms, 357 test, base_test_result.ResultType.TIMEOUT, start_ms, duration_ms,
357 log=str(e) or 'No information')) 358 log=str(e) or 'No information'))
358 except device_errors.DeviceUnreachableError as e: 359 except device_errors.DeviceUnreachableError as e:
359 results.AddResult(test_result.InstrumentationTestResult( 360 results.AddResult(test_result.InstrumentationTestResult(
360 test, base_test_result.ResultType.CRASH, start_ms, duration_ms, 361 test, base_test_result.ResultType.CRASH, start_ms, duration_ms,
361 log=str(e) or 'No information')) 362 log=str(e) or 'No information'))
362 self.TestTeardown(test, results) 363 self.TestTeardown(test, results)
363 return (results, None if results.DidRunPass() else test) 364 return (results, None if results.DidRunPass() else test)
OLDNEW
« no previous file with comments | « build/android/pylib/device/logcat_monitor_test.py ('k') | build/android/pylib/linker/test_case.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698