OLD | NEW |
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 """Class for running instrumentation tests on a single device.""" | 5 """Class for running instrumentation tests on a single device.""" |
6 | 6 |
7 import logging | 7 import logging |
8 import os | 8 import os |
9 import re | 9 import re |
10 import sys | 10 import sys |
11 import time | 11 import time |
12 | 12 |
13 | 13 |
14 sys.path.append(os.path.join(sys.path[0], | 14 sys.path.append(os.path.join(sys.path[0], |
15 os.pardir, os.pardir, 'build', 'util', 'lib', | 15 os.pardir, os.pardir, 'build', 'util', 'lib', |
16 'common')) | 16 'common')) |
17 import perf_tests_results_helper | 17 import perf_tests_results_helper |
18 | 18 |
19 from pylib import android_commands | 19 from pylib import android_commands |
20 from pylib import constants | 20 from pylib import constants |
21 from pylib import flag_changer | 21 from pylib import flag_changer |
22 from pylib import valgrind_tools | 22 from pylib import valgrind_tools |
23 from pylib.base import base_test_result | 23 from pylib.base import base_test_result |
24 from pylib.base import base_test_runner | 24 from pylib.base import base_test_runner |
25 from pylib.instrumentation import json_perf_parser | 25 from pylib.instrumentation import json_perf_parser |
26 | 26 from pylib.instrumentation import test_result |
27 import test_result | |
28 | 27 |
29 | 28 |
30 _PERF_TEST_ANNOTATION = 'PerfTest' | 29 _PERF_TEST_ANNOTATION = 'PerfTest' |
31 | 30 |
32 | 31 |
33 def _GetDataFilesForTestSuite(suite_basename): | 32 def _GetDataFilesForTestSuite(suite_basename): |
34 """Returns a list of data files/dirs needed by the test suite. | 33 """Returns a list of data files/dirs needed by the test suite. |
35 | 34 |
36 Args: | 35 Args: |
37 suite_basename: The test suite basename for which to return file paths. | 36 suite_basename: The test suite basename for which to return file paths. |
(...skipping 28 matching lines...) Expand all Loading... |
66 device: Attached android device. | 65 device: Attached android device. |
67 shard_index: Shard index. | 66 shard_index: Shard index. |
68 test_pkg: A TestPackage object. | 67 test_pkg: A TestPackage object. |
69 additional_flags: A list of additional flags to add to the command line. | 68 additional_flags: A list of additional flags to add to the command line. |
70 """ | 69 """ |
71 super(TestRunner, self).__init__(device, test_options.tool, | 70 super(TestRunner, self).__init__(device, test_options.tool, |
72 test_options.push_deps, | 71 test_options.push_deps, |
73 test_options.cleanup_test_files) | 72 test_options.cleanup_test_files) |
74 self._lighttp_port = constants.LIGHTTPD_RANDOM_PORT_FIRST + shard_index | 73 self._lighttp_port = constants.LIGHTTPD_RANDOM_PORT_FIRST + shard_index |
75 | 74 |
| 75 self.coverage_device_file = None |
| 76 self.coverage_dir = test_options.coverage_dir |
| 77 self.coverage_host_file = None |
76 self.options = test_options | 78 self.options = test_options |
77 self.test_pkg = test_pkg | 79 self.test_pkg = test_pkg |
78 self.coverage_dir = test_options.coverage_dir | |
79 # Use the correct command line file for the package under test. | 80 # Use the correct command line file for the package under test. |
80 cmdline_file = [a.cmdline_file for a in constants.PACKAGE_INFO.itervalues() | 81 cmdline_file = [a.cmdline_file for a in constants.PACKAGE_INFO.itervalues() |
81 if a.test_package == self.test_pkg.GetPackageName()] | 82 if a.test_package == self.test_pkg.GetPackageName()] |
82 assert len(cmdline_file) < 2, 'Multiple packages have the same test package' | 83 assert len(cmdline_file) < 2, 'Multiple packages have the same test package' |
83 if len(cmdline_file) and cmdline_file[0]: | 84 if len(cmdline_file) and cmdline_file[0]: |
84 self.flags = flag_changer.FlagChanger(self.adb, cmdline_file[0]) | 85 self.flags = flag_changer.FlagChanger(self.adb, cmdline_file[0]) |
85 if additional_flags: | 86 if additional_flags: |
86 self.flags.AddFlags(additional_flags) | 87 self.flags.AddFlags(additional_flags) |
87 else: | 88 else: |
88 self.flags = None | 89 self.flags = None |
(...skipping 16 matching lines...) Expand all Loading... |
105 # Make sure SD card is ready. | 106 # Make sure SD card is ready. |
106 self.adb.WaitForSdCardReady(20) | 107 self.adb.WaitForSdCardReady(20) |
107 for p in test_data: | 108 for p in test_data: |
108 self.adb.PushIfNeeded( | 109 self.adb.PushIfNeeded( |
109 os.path.join(constants.DIR_SOURCE_ROOT, p), | 110 os.path.join(constants.DIR_SOURCE_ROOT, p), |
110 os.path.join(self.adb.GetExternalStorage(), p)) | 111 os.path.join(self.adb.GetExternalStorage(), p)) |
111 | 112 |
112 # TODO(frankf): Specify test data in this file as opposed to passing | 113 # TODO(frankf): Specify test data in this file as opposed to passing |
113 # as command-line. | 114 # as command-line. |
114 for dest_host_pair in self.options.test_data: | 115 for dest_host_pair in self.options.test_data: |
115 dst_src = dest_host_pair.split(':',1) | 116 dst_src = dest_host_pair.split(':', 1) |
116 dst_layer = dst_src[0] | 117 dst_layer = dst_src[0] |
117 host_src = dst_src[1] | 118 host_src = dst_src[1] |
118 host_test_files_path = '%s/%s' % (constants.DIR_SOURCE_ROOT, host_src) | 119 host_test_files_path = '%s/%s' % (constants.DIR_SOURCE_ROOT, host_src) |
119 if os.path.exists(host_test_files_path): | 120 if os.path.exists(host_test_files_path): |
120 self.adb.PushIfNeeded(host_test_files_path, '%s/%s/%s' % ( | 121 self.adb.PushIfNeeded(host_test_files_path, '%s/%s/%s' % ( |
121 self.adb.GetExternalStorage(), TestRunner._DEVICE_DATA_DIR, | 122 self.adb.GetExternalStorage(), TestRunner._DEVICE_DATA_DIR, |
122 dst_layer)) | 123 dst_layer)) |
123 self.tool.CopyFiles() | 124 self.tool.CopyFiles() |
124 TestRunner._DEVICE_HAS_TEST_FILES[self.device] = True | 125 TestRunner._DEVICE_HAS_TEST_FILES[self.device] = True |
125 | 126 |
(...skipping 19 matching lines...) Expand all Loading... |
145 if not self.adb.IsRootEnabled(): | 146 if not self.adb.IsRootEnabled(): |
146 logging.warning('Unable to enable java asserts for %s, non rooted device', | 147 logging.warning('Unable to enable java asserts for %s, non rooted device', |
147 self.device) | 148 self.device) |
148 else: | 149 else: |
149 if self.adb.SetJavaAssertsEnabled(True): | 150 if self.adb.SetJavaAssertsEnabled(True): |
150 self.adb.Reboot(full_reboot=False) | 151 self.adb.Reboot(full_reboot=False) |
151 | 152 |
152 # We give different default value to launch HTTP server based on shard index | 153 # We give different default value to launch HTTP server based on shard index |
153 # because it may have race condition when multiple processes are trying to | 154 # because it may have race condition when multiple processes are trying to |
154 # launch lighttpd with same port at same time. | 155 # launch lighttpd with same port at same time. |
155 http_server_ports = self.LaunchTestHttpServer( | 156 self.LaunchTestHttpServer( |
156 os.path.join(constants.DIR_SOURCE_ROOT), self._lighttp_port) | 157 os.path.join(constants.DIR_SOURCE_ROOT), self._lighttp_port) |
157 if self.flags: | 158 if self.flags: |
158 self.flags.AddFlags(['--disable-fre', '--enable-test-intents']) | 159 self.flags.AddFlags(['--disable-fre', '--enable-test-intents']) |
159 | 160 |
160 def TearDown(self): | 161 def TearDown(self): |
161 """Cleans up the test harness and saves outstanding data from test run.""" | 162 """Cleans up the test harness and saves outstanding data from test run.""" |
162 if self.flags: | 163 if self.flags: |
163 self.flags.Restore() | 164 self.flags.Restore() |
164 super(TestRunner, self).TearDown() | 165 super(TestRunner, self).TearDown() |
165 | 166 |
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
325 self._GetInstrumentationArgs(), timeout) | 326 self._GetInstrumentationArgs(), timeout) |
326 except android_commands.errors.WaitForResponseTimedOutError: | 327 except android_commands.errors.WaitForResponseTimedOutError: |
327 logging.info('Ran the test with timeout of %ds.' % timeout) | 328 logging.info('Ran the test with timeout of %ds.' % timeout) |
328 raise | 329 raise |
329 | 330 |
330 #override | 331 #override |
331 def RunTest(self, test): | 332 def RunTest(self, test): |
332 raw_result = None | 333 raw_result = None |
333 start_date_ms = None | 334 start_date_ms = None |
334 results = base_test_result.TestRunResults() | 335 results = base_test_result.TestRunResults() |
335 timeout=(self._GetIndividualTestTimeoutSecs(test) * | 336 timeout = (self._GetIndividualTestTimeoutSecs(test) * |
336 self._GetIndividualTestTimeoutScale(test) * | 337 self._GetIndividualTestTimeoutScale(test) * |
337 self.tool.GetTimeoutScale()) | 338 self.tool.GetTimeoutScale()) |
338 try: | 339 try: |
339 self.TestSetup(test) | 340 self.TestSetup(test) |
340 start_date_ms = int(time.time()) * 1000 | 341 start_date_ms = int(time.time()) * 1000 |
341 raw_result = self._RunTest(test, timeout) | 342 raw_result = self._RunTest(test, timeout) |
342 duration_ms = int(time.time()) * 1000 - start_date_ms | 343 duration_ms = int(time.time()) * 1000 - start_date_ms |
343 status_code = raw_result.GetStatusCode() | 344 status_code = raw_result.GetStatusCode() |
344 if status_code: | 345 if status_code: |
345 if self.options.screenshot_failures: | 346 if self.options.screenshot_failures: |
346 self._TakeScreenshot(test) | 347 self._TakeScreenshot(test) |
347 log = raw_result.GetFailureReason() | 348 log = raw_result.GetFailureReason() |
(...skipping 22 matching lines...) Expand all Loading... |
370 duration_ms = 0 | 371 duration_ms = 0 |
371 message = str(e) | 372 message = str(e) |
372 if not message: | 373 if not message: |
373 message = 'No information.' | 374 message = 'No information.' |
374 results.AddResult(test_result.InstrumentationTestResult( | 375 results.AddResult(test_result.InstrumentationTestResult( |
375 test, base_test_result.ResultType.CRASH, start_date_ms, duration_ms, | 376 test, base_test_result.ResultType.CRASH, start_date_ms, duration_ms, |
376 log=message)) | 377 log=message)) |
377 raw_result = None | 378 raw_result = None |
378 self.TestTeardown(test, raw_result) | 379 self.TestTeardown(test, raw_result) |
379 return (results, None if results.DidRunPass() else test) | 380 return (results, None if results.DidRunPass() else test) |
OLD | NEW |