Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(123)

Side by Side Diff: build/android/pylib/instrumentation/test_runner.py

Issue 2101243005: Add a snapshot of flutter/engine/src/build to our sdk (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: add README.dart Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
4
5 """Class for running instrumentation tests on a single device."""
6
7 import logging
8 import os
9 import re
10 import sys
11 import time
12
13 from pylib import constants
14 from pylib import flag_changer
15 from pylib import valgrind_tools
16 from pylib.base import base_test_result
17 from pylib.base import base_test_runner
18 from pylib.device import device_errors
19 from pylib.instrumentation import instrumentation_test_instance
20 from pylib.instrumentation import json_perf_parser
21 from pylib.instrumentation import test_result
22 from pylib.local.device import local_device_instrumentation_test_run
23
24 sys.path.append(os.path.join(constants.DIR_SOURCE_ROOT, 'build', 'util', 'lib',
25 'common'))
26 import perf_tests_results_helper # pylint: disable=F0401
27
28
29 _PERF_TEST_ANNOTATION = 'PerfTest'
30
31
32 class TestRunner(base_test_runner.BaseTestRunner):
33 """Responsible for running a series of tests connected to a single device."""
34
35 _DEVICE_COVERAGE_DIR = 'chrome/test/coverage'
36 _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile'
37 _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR +
38 '/chrome-profile*')
39
40 def __init__(self, test_options, device, shard_index, test_pkg,
41 additional_flags=None):
42 """Create a new TestRunner.
43
44 Args:
45 test_options: An InstrumentationOptions object.
46 device: Attached android device.
47 shard_index: Shard index.
48 test_pkg: A TestPackage object.
49 additional_flags: A list of additional flags to add to the command line.
50 """
51 super(TestRunner, self).__init__(device, test_options.tool)
52 self._lighttp_port = constants.LIGHTTPD_RANDOM_PORT_FIRST + shard_index
53 self._logcat_monitor = None
54
55 self.coverage_device_file = None
56 self.coverage_dir = test_options.coverage_dir
57 self.coverage_host_file = None
58 self.options = test_options
59 self.test_pkg = test_pkg
60 # Use the correct command line file for the package under test.
61 cmdline_file = [a.cmdline_file for a in constants.PACKAGE_INFO.itervalues()
62 if a.test_package == self.test_pkg.GetPackageName()]
63 assert len(cmdline_file) < 2, 'Multiple packages have the same test package'
64 if len(cmdline_file) and cmdline_file[0]:
65 self.flags = flag_changer.FlagChanger(self.device, cmdline_file[0])
66 if additional_flags:
67 self.flags.AddFlags(additional_flags)
68 else:
69 self.flags = None
70
71 #override
72 def InstallTestPackage(self):
73 self.test_pkg.Install(self.device)
74
75 def _GetInstrumentationArgs(self):
76 ret = {}
77 if self.options.wait_for_debugger:
78 ret['debug'] = 'true'
79 if self.coverage_dir:
80 ret['coverage'] = 'true'
81 ret['coverageFile'] = self.coverage_device_file
82
83 return ret
84
85 def _TakeScreenshot(self, test):
86 """Takes a screenshot from the device."""
87 screenshot_name = os.path.join(constants.SCREENSHOTS_DIR, '%s.png' % test)
88 logging.info('Taking screenshot named %s', screenshot_name)
89 self.device.TakeScreenshot(screenshot_name)
90
91 def SetUp(self):
92 """Sets up the test harness and device before all tests are run."""
93 super(TestRunner, self).SetUp()
94 if not self.device.HasRoot():
95 logging.warning('Unable to enable java asserts for %s, non rooted device',
96 str(self.device))
97 else:
98 if self.device.SetJavaAsserts(self.options.set_asserts):
99 # TODO(jbudorick) How to best do shell restart after the
100 # android_commands refactor?
101 self.device.RunShellCommand('stop')
102 self.device.RunShellCommand('start')
103 self.device.WaitUntilFullyBooted()
104
105 # We give different default value to launch HTTP server based on shard index
106 # because it may have race condition when multiple processes are trying to
107 # launch lighttpd with same port at same time.
108 self.LaunchTestHttpServer(
109 os.path.join(constants.DIR_SOURCE_ROOT), self._lighttp_port)
110 if self.flags:
111 self.flags.AddFlags(['--disable-fre', '--enable-test-intents'])
112 if self.options.device_flags:
113 with open(self.options.device_flags) as device_flags_file:
114 stripped_flags = (l.strip() for l in device_flags_file)
115 self.flags.AddFlags([flag for flag in stripped_flags if flag])
116
117 def TearDown(self):
118 """Cleans up the test harness and saves outstanding data from test run."""
119 if self.flags:
120 self.flags.Restore()
121 super(TestRunner, self).TearDown()
122
123 def TestSetup(self, test):
124 """Sets up the test harness for running a particular test.
125
126 Args:
127 test: The name of the test that will be run.
128 """
129 self.SetupPerfMonitoringIfNeeded(test)
130 self._SetupIndividualTestTimeoutScale(test)
131 self.tool.SetupEnvironment()
132
133 if self.flags and self._IsFreTest(test):
134 self.flags.RemoveFlags(['--disable-fre'])
135
136 # Make sure the forwarder is still running.
137 self._RestartHttpServerForwarderIfNecessary()
138
139 if self.coverage_dir:
140 coverage_basename = '%s.ec' % test
141 self.coverage_device_file = '%s/%s/%s' % (
142 self.device.GetExternalStoragePath(),
143 TestRunner._DEVICE_COVERAGE_DIR, coverage_basename)
144 self.coverage_host_file = os.path.join(
145 self.coverage_dir, coverage_basename)
146
147 def _IsFreTest(self, test):
148 """Determines whether a test is a first run experience test.
149
150 Args:
151 test: The name of the test to be checked.
152
153 Returns:
154 Whether the feature being tested is FirstRunExperience.
155 """
156 annotations = self.test_pkg.GetTestAnnotations(test)
157 return 'FirstRunExperience' == annotations.get('Feature', None)
158
159 def _IsPerfTest(self, test):
160 """Determines whether a test is a performance test.
161
162 Args:
163 test: The name of the test to be checked.
164
165 Returns:
166 Whether the test is annotated as a performance test.
167 """
168 return _PERF_TEST_ANNOTATION in self.test_pkg.GetTestAnnotations(test)
169
170 def SetupPerfMonitoringIfNeeded(self, test):
171 """Sets up performance monitoring if the specified test requires it.
172
173 Args:
174 test: The name of the test to be run.
175 """
176 if not self._IsPerfTest(test):
177 return
178 self.device.RunShellCommand(
179 ['rm', TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX])
180 self._logcat_monitor = self.device.GetLogcatMonitor()
181 self._logcat_monitor.Start()
182
183 def TestTeardown(self, test, result):
184 """Cleans up the test harness after running a particular test.
185
186 Depending on the options of this TestRunner this might handle performance
187 tracking. This method will only be called if the test passed.
188
189 Args:
190 test: The name of the test that was just run.
191 result: result for this test.
192 """
193
194 self.tool.CleanUpEnvironment()
195
196 # The logic below relies on the test passing.
197 if not result or not result.DidRunPass():
198 return
199
200 self.TearDownPerfMonitoring(test)
201
202 if self.flags and self._IsFreTest(test):
203 self.flags.AddFlags(['--disable-fre'])
204
205 if self.coverage_dir:
206 self.device.PullFile(
207 self.coverage_device_file, self.coverage_host_file)
208 self.device.RunShellCommand(
209 'rm -f %s' % self.coverage_device_file)
210
211 def TearDownPerfMonitoring(self, test):
212 """Cleans up performance monitoring if the specified test required it.
213
214 Args:
215 test: The name of the test that was just run.
216 Raises:
217 Exception: if there's anything wrong with the perf data.
218 """
219 if not self._IsPerfTest(test):
220 return
221 raw_test_name = test.split('#')[1]
222
223 # Wait and grab annotation data so we can figure out which traces to parse
224 regex = self._logcat_monitor.WaitFor(
225 re.compile(r'\*\*PERFANNOTATION\(' + raw_test_name + r'\)\:(.*)'))
226
227 # If the test is set to run on a specific device type only (IE: only
228 # tablet or phone) and it is being run on the wrong device, the test
229 # just quits and does not do anything. The java test harness will still
230 # print the appropriate annotation for us, but will add --NORUN-- for
231 # us so we know to ignore the results.
232 # The --NORUN-- tag is managed by ChromeTabbedActivityTestBase.java
233 if regex.group(1) != '--NORUN--':
234
235 # Obtain the relevant perf data. The data is dumped to a
236 # JSON formatted file.
237 json_string = self.device.ReadFile(
238 '/data/data/com.google.android.apps.chrome/files/PerfTestData.txt',
239 as_root=True)
240
241 if not json_string:
242 raise Exception('Perf file is empty')
243
244 if self.options.save_perf_json:
245 json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name
246 with open(json_local_file, 'w') as f:
247 f.write(json_string)
248 logging.info('Saving Perf UI JSON from test ' +
249 test + ' to ' + json_local_file)
250
251 raw_perf_data = regex.group(1).split(';')
252
253 for raw_perf_set in raw_perf_data:
254 if raw_perf_set:
255 perf_set = raw_perf_set.split(',')
256 if len(perf_set) != 3:
257 raise Exception('Unexpected number of tokens in perf annotation '
258 'string: ' + raw_perf_set)
259
260 # Process the performance data
261 result = json_perf_parser.GetAverageRunInfoFromJSONString(json_string,
262 perf_set[0])
263 perf_tests_results_helper.PrintPerfResult(perf_set[1], perf_set[2],
264 [result['average']],
265 result['units'])
266
267 def _SetupIndividualTestTimeoutScale(self, test):
268 timeout_scale = self._GetIndividualTestTimeoutScale(test)
269 valgrind_tools.SetChromeTimeoutScale(self.device, timeout_scale)
270
271 def _GetIndividualTestTimeoutScale(self, test):
272 """Returns the timeout scale for the given |test|."""
273 annotations = self.test_pkg.GetTestAnnotations(test)
274 timeout_scale = 1
275 if 'TimeoutScale' in annotations:
276 try:
277 timeout_scale = int(annotations['TimeoutScale'])
278 except ValueError:
279 logging.warning('Non-integer value of TimeoutScale ignored. (%s)'
280 % annotations['TimeoutScale'])
281 if self.options.wait_for_debugger:
282 timeout_scale *= 100
283 return timeout_scale
284
285 def _GetIndividualTestTimeoutSecs(self, test):
286 """Returns the timeout in seconds for the given |test|."""
287 annotations = self.test_pkg.GetTestAnnotations(test)
288 if 'Manual' in annotations:
289 return 10 * 60 * 60
290 if 'IntegrationTest' in annotations:
291 return 30 * 60
292 if 'External' in annotations:
293 return 10 * 60
294 if 'EnormousTest' in annotations:
295 return 10 * 60
296 if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations:
297 return 5 * 60
298 if 'MediumTest' in annotations:
299 return 3 * 60
300 if 'SmallTest' in annotations:
301 return 1 * 60
302
303 logging.warn(("Test size not found in annotations for test '%s', using " +
304 "1 minute for timeout.") % test)
305 return 1 * 60
306
307 def _RunTest(self, test, timeout):
308 """Runs a single instrumentation test.
309
310 Args:
311 test: Test class/method.
312 timeout: Timeout time in seconds.
313
314 Returns:
315 The raw output of am instrument as a list of lines.
316 """
317 extras = self._GetInstrumentationArgs()
318 extras['class'] = test
319 return self.device.StartInstrumentation(
320 '%s/%s' % (self.test_pkg.GetPackageName(), self.options.test_runner),
321 raw=True, extras=extras, timeout=timeout, retries=3)
322
323 def _GenerateTestResult(self, test, instr_result_code, instr_result_bundle,
324 statuses, start_ms, duration_ms):
325 results = instrumentation_test_instance.GenerateTestResults(
326 instr_result_code, instr_result_bundle, statuses, start_ms, duration_ms)
327 for r in results:
328 if r.GetName() == test:
329 return r
330 logging.error('Could not find result for test: %s', test)
331 return test_result.InstrumentationTestResult(
332 test, base_test_result.ResultType.UNKNOWN, start_ms, duration_ms)
333
334 #override
335 def RunTest(self, test):
336 results = base_test_result.TestRunResults()
337 timeout = (self._GetIndividualTestTimeoutSecs(test) *
338 self._GetIndividualTestTimeoutScale(test) *
339 self.tool.GetTimeoutScale())
340
341 start_ms = 0
342 duration_ms = 0
343 try:
344 self.TestSetup(test)
345
346 try:
347 self.device.GoHome()
348 except device_errors.CommandTimeoutError:
349 logging.exception('Failed to focus the launcher.')
350
351 time_ms = lambda: int(time.time() * 1000)
352 start_ms = time_ms()
353 raw_output = self._RunTest(test, timeout)
354 duration_ms = time_ms() - start_ms
355
356 # Parse the test output
357 result_code, result_bundle, statuses = (
358 instrumentation_test_instance.ParseAmInstrumentRawOutput(raw_output))
359 result = self._GenerateTestResult(
360 test, result_code, result_bundle, statuses, start_ms, duration_ms)
361 if local_device_instrumentation_test_run.DidPackageCrashOnDevice(
362 self.test_pkg.GetPackageName(), self.device):
363 result.SetType(base_test_result.ResultType.CRASH)
364 results.AddResult(result)
365 except device_errors.CommandTimeoutError as e:
366 results.AddResult(test_result.InstrumentationTestResult(
367 test, base_test_result.ResultType.TIMEOUT, start_ms, duration_ms,
368 log=str(e) or 'No information'))
369 except device_errors.DeviceUnreachableError as e:
370 results.AddResult(test_result.InstrumentationTestResult(
371 test, base_test_result.ResultType.CRASH, start_ms, duration_ms,
372 log=str(e) or 'No information'))
373 self.TestTeardown(test, results)
374 return (results, None if results.DidRunPass() else test)
OLDNEW
« no previous file with comments | « build/android/pylib/instrumentation/test_result.py ('k') | build/android/pylib/junit/__init__.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698