| OLD | NEW |
| 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 """Runs the Java tests. See more information on run_instrumentation_tests.py.""" | 5 """Runs the Java tests. See more information on run_instrumentation_tests.py.""" |
| 6 | 6 |
| 7 import logging | 7 import logging |
| 8 import os | 8 import os |
| 9 import re | 9 import re |
| 10 import shutil | 10 import shutil |
| (...skipping 30 matching lines...) Expand all Loading... |
| 41 test_files += [ | 41 test_files += [ |
| 42 'net/data/ssl/certificates/', | 42 'net/data/ssl/certificates/', |
| 43 ] | 43 ] |
| 44 return test_files | 44 return test_files |
| 45 | 45 |
| 46 | 46 |
| 47 class TestRunner(base_test_runner.BaseTestRunner): | 47 class TestRunner(base_test_runner.BaseTestRunner): |
| 48 """Responsible for running a series of tests connected to a single device.""" | 48 """Responsible for running a series of tests connected to a single device.""" |
| 49 | 49 |
| 50 _DEVICE_DATA_DIR = 'chrome/test/data' | 50 _DEVICE_DATA_DIR = 'chrome/test/data' |
| 51 _EMMA_JAR = os.path.join(os.environ.get('ANDROID_BUILD_TOP', ''), | |
| 52 'external/emma/lib/emma.jar') | |
| 53 _COVERAGE_MERGED_FILENAME = 'unittest_coverage.es' | |
| 54 _COVERAGE_WEB_ROOT_DIR = os.environ.get('EMMA_WEB_ROOTDIR') | |
| 55 _COVERAGE_FILENAME = 'coverage.ec' | |
| 56 _COVERAGE_RESULT_PATH = ('/data/data/com.google.android.apps.chrome/files/' + | |
| 57 _COVERAGE_FILENAME) | |
| 58 _COVERAGE_META_INFO_PATH = os.path.join(os.environ.get('ANDROID_BUILD_TOP', | |
| 59 ''), | |
| 60 'out/target/common/obj/APPS', | |
| 61 'Chrome_intermediates/coverage.em') | |
| 62 _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile' | 51 _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile' |
| 63 _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR + | 52 _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR + |
| 64 '/chrome-profile*') | 53 '/chrome-profile*') |
| 65 _DEVICE_HAS_TEST_FILES = {} | 54 _DEVICE_HAS_TEST_FILES = {} |
| 66 | 55 |
| 67 def __init__(self, options, device, shard_index, coverage, test_pkg, | 56 def __init__(self, options, device, shard_index, test_pkg, |
| 68 ports_to_forward, is_uiautomator_test=False): | 57 ports_to_forward, is_uiautomator_test=False): |
| 69 """Create a new TestRunner. | 58 """Create a new TestRunner. |
| 70 | 59 |
| 71 Args: | 60 Args: |
| 72 options: An options object with the following required attributes: | 61 options: An options object with the following required attributes: |
| 73 - build_type: 'Release' or 'Debug'. | 62 - build_type: 'Release' or 'Debug'. |
| 74 - install_apk: Re-installs the apk if opted. | 63 - install_apk: Re-installs the apk if opted. |
| 75 - save_perf_json: Whether or not to save the JSON file from UI perf | 64 - save_perf_json: Whether or not to save the JSON file from UI perf |
| 76 tests. | 65 tests. |
| 77 - screenshot_failures: Take a screenshot for a test failure | 66 - screenshot_failures: Take a screenshot for a test failure |
| 78 - tool: Name of the Valgrind tool. | 67 - tool: Name of the Valgrind tool. |
| 79 - wait_for_debugger: blocks until the debugger is connected. | 68 - wait_for_debugger: blocks until the debugger is connected. |
| 80 - disable_assertions: Whether to disable java assertions on the device. | 69 - disable_assertions: Whether to disable java assertions on the device. |
| 81 device: Attached android device. | 70 device: Attached android device. |
| 82 shard_index: Shard index. | 71 shard_index: Shard index. |
| 83 coverage: Collects coverage information if opted. | |
| 84 test_pkg: A TestPackage object. | 72 test_pkg: A TestPackage object. |
| 85 ports_to_forward: A list of port numbers for which to set up forwarders. | 73 ports_to_forward: A list of port numbers for which to set up forwarders. |
| 86 Can be optionally requested by a test case. | 74 Can be optionally requested by a test case. |
| 87 is_uiautomator_test: Whether this is a uiautomator test. | 75 is_uiautomator_test: Whether this is a uiautomator test. |
| 88 Raises: | |
| 89 Exception: if coverage metadata is not available. | |
| 90 """ | 76 """ |
| 91 super(TestRunner, self).__init__(device, options.tool, options.build_type) | 77 super(TestRunner, self).__init__(device, options.tool, options.build_type) |
| 92 self._lighttp_port = constants.LIGHTTPD_RANDOM_PORT_FIRST + shard_index | 78 self._lighttp_port = constants.LIGHTTPD_RANDOM_PORT_FIRST + shard_index |
| 93 | 79 |
| 94 self.build_type = options.build_type | 80 self.build_type = options.build_type |
| 95 self.test_data = options.test_data | 81 self.test_data = options.test_data |
| 96 self.save_perf_json = options.save_perf_json | 82 self.save_perf_json = options.save_perf_json |
| 97 self.screenshot_failures = options.screenshot_failures | 83 self.screenshot_failures = options.screenshot_failures |
| 98 self.wait_for_debugger = options.wait_for_debugger | 84 self.wait_for_debugger = options.wait_for_debugger |
| 99 self.disable_assertions = options.disable_assertions | 85 self.disable_assertions = options.disable_assertions |
| 100 self.coverage = coverage | |
| 101 self.test_pkg = test_pkg | 86 self.test_pkg = test_pkg |
| 102 self.ports_to_forward = ports_to_forward | 87 self.ports_to_forward = ports_to_forward |
| 103 self.is_uiautomator_test = is_uiautomator_test | 88 self.is_uiautomator_test = is_uiautomator_test |
| 104 if self.is_uiautomator_test: | 89 if self.is_uiautomator_test: |
| 105 self.package_name = options.package_name | 90 self.package_name = options.package_name |
| 106 else: | 91 else: |
| 107 self.install_apk = options.install_apk | 92 self.install_apk = options.install_apk |
| 108 | 93 |
| 109 self.forwarder = None | 94 self.forwarder = None |
| 110 | 95 |
| 111 if self.coverage: | |
| 112 if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME): | |
| 113 os.remove(TestRunner._COVERAGE_MERGED_FILENAME) | |
| 114 if not os.path.exists(TestRunner._COVERAGE_META_INFO_PATH): | |
| 115 raise Exception('FATAL ERROR in ' + sys.argv[0] + | |
| 116 ' : Coverage meta info [' + | |
| 117 TestRunner._COVERAGE_META_INFO_PATH + | |
| 118 '] does not exist.') | |
| 119 if (not TestRunner._COVERAGE_WEB_ROOT_DIR or | |
| 120 not os.path.exists(TestRunner._COVERAGE_WEB_ROOT_DIR)): | |
| 121 raise Exception('FATAL ERROR in ' + sys.argv[0] + | |
| 122 ' : Path specified in $EMMA_WEB_ROOTDIR [' + | |
| 123 TestRunner._COVERAGE_WEB_ROOT_DIR + | |
| 124 '] does not exist.') | |
| 125 | |
| 126 #override. | 96 #override. |
| 127 def PushDependencies(self): | 97 def PushDependencies(self): |
| 128 # TODO(frankf): Implement a general approach for copying/installing | 98 # TODO(frankf): Implement a general approach for copying/installing |
| 129 # once across test runners. | 99 # once across test runners. |
| 130 if TestRunner._DEVICE_HAS_TEST_FILES.get(self.device, False): | 100 if TestRunner._DEVICE_HAS_TEST_FILES.get(self.device, False): |
| 131 logging.warning('Already copied test files to device %s, skipping.', | 101 logging.warning('Already copied test files to device %s, skipping.', |
| 132 self.device) | 102 self.device) |
| 133 return | 103 return |
| 134 | 104 |
| 135 test_data = _GetDataFilesForTestSuite(self.test_pkg.GetApkName()) | 105 test_data = _GetDataFilesForTestSuite(self.test_pkg.GetApkName()) |
| (...skipping 12 matching lines...) Expand all Loading... |
| 148 host_test_files_path = constants.CHROME_DIR + '/' + host_src | 118 host_test_files_path = constants.CHROME_DIR + '/' + host_src |
| 149 if os.path.exists(host_test_files_path): | 119 if os.path.exists(host_test_files_path): |
| 150 self.adb.PushIfNeeded(host_test_files_path, | 120 self.adb.PushIfNeeded(host_test_files_path, |
| 151 self.adb.GetExternalStorage() + '/' + | 121 self.adb.GetExternalStorage() + '/' + |
| 152 TestRunner._DEVICE_DATA_DIR + '/' + dst_layer) | 122 TestRunner._DEVICE_DATA_DIR + '/' + dst_layer) |
| 153 if self.is_uiautomator_test or self.install_apk: | 123 if self.is_uiautomator_test or self.install_apk: |
| 154 self.test_pkg.Install(self.adb) | 124 self.test_pkg.Install(self.adb) |
| 155 self.tool.CopyFiles() | 125 self.tool.CopyFiles() |
| 156 TestRunner._DEVICE_HAS_TEST_FILES[self.device] = True | 126 TestRunner._DEVICE_HAS_TEST_FILES[self.device] = True |
| 157 | 127 |
| 158 def SaveCoverageData(self, test): | |
| 159 """Saves the Emma coverage data before it's overwritten by the next test. | |
| 160 | |
| 161 Args: | |
| 162 test: the test whose coverage data is collected. | |
| 163 """ | |
| 164 if not self.coverage: | |
| 165 return | |
| 166 if not self.adb.Adb().Pull(TestRunner._COVERAGE_RESULT_PATH, | |
| 167 constants.CHROME_DIR): | |
| 168 logging.error('ERROR: Unable to find file ' + | |
| 169 TestRunner._COVERAGE_RESULT_PATH + | |
| 170 ' on the device for test ' + test) | |
| 171 pulled_coverage_file = os.path.join(constants.CHROME_DIR, | |
| 172 TestRunner._COVERAGE_FILENAME) | |
| 173 if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME): | |
| 174 cmd = ['java', '-classpath', TestRunner._EMMA_JAR, 'emma', 'merge', | |
| 175 '-in', pulled_coverage_file, | |
| 176 '-in', TestRunner._COVERAGE_MERGED_FILENAME, | |
| 177 '-out', TestRunner._COVERAGE_MERGED_FILENAME] | |
| 178 cmd_helper.RunCmd(cmd) | |
| 179 else: | |
| 180 shutil.copy(pulled_coverage_file, | |
| 181 TestRunner._COVERAGE_MERGED_FILENAME) | |
| 182 os.remove(pulled_coverage_file) | |
| 183 | |
| 184 def GenerateCoverageReportIfNeeded(self): | |
| 185 """Uses the Emma to generate a coverage report and a html page.""" | |
| 186 if not self.coverage: | |
| 187 return | |
| 188 cmd = ['java', '-classpath', TestRunner._EMMA_JAR, | |
| 189 'emma', 'report', '-r', 'html', | |
| 190 '-in', TestRunner._COVERAGE_MERGED_FILENAME, | |
| 191 '-in', TestRunner._COVERAGE_META_INFO_PATH] | |
| 192 cmd_helper.RunCmd(cmd) | |
| 193 new_dir = os.path.join(TestRunner._COVERAGE_WEB_ROOT_DIR, | |
| 194 time.strftime('Coverage_for_%Y_%m_%d_%a_%H:%M')) | |
| 195 shutil.copytree('coverage', new_dir) | |
| 196 | |
| 197 latest_dir = os.path.join(TestRunner._COVERAGE_WEB_ROOT_DIR, | |
| 198 'Latest_Coverage_Run') | |
| 199 if os.path.exists(latest_dir): | |
| 200 shutil.rmtree(latest_dir) | |
| 201 os.mkdir(latest_dir) | |
| 202 webserver_new_index = os.path.join(new_dir, 'index.html') | |
| 203 webserver_new_files = os.path.join(new_dir, '_files') | |
| 204 webserver_latest_index = os.path.join(latest_dir, 'index.html') | |
| 205 webserver_latest_files = os.path.join(latest_dir, '_files') | |
| 206 # Setup new softlinks to last result. | |
| 207 os.symlink(webserver_new_index, webserver_latest_index) | |
| 208 os.symlink(webserver_new_files, webserver_latest_files) | |
| 209 cmd_helper.RunCmd(['chmod', '755', '-R', latest_dir, new_dir]) | |
| 210 | |
| 211 def _GetInstrumentationArgs(self): | 128 def _GetInstrumentationArgs(self): |
| 212 ret = {} | 129 ret = {} |
| 213 if self.coverage: | |
| 214 ret['coverage'] = 'true' | |
| 215 if self.wait_for_debugger: | 130 if self.wait_for_debugger: |
| 216 ret['debug'] = 'true' | 131 ret['debug'] = 'true' |
| 217 return ret | 132 return ret |
| 218 | 133 |
| 219 def _TakeScreenshot(self, test): | 134 def _TakeScreenshot(self, test): |
| 220 """Takes a screenshot from the device.""" | 135 """Takes a screenshot from the device.""" |
| 221 screenshot_name = os.path.join(constants.SCREENSHOTS_DIR, test + '.png') | 136 screenshot_name = os.path.join(constants.SCREENSHOTS_DIR, test + '.png') |
| 222 logging.info('Taking screenshot named %s', screenshot_name) | 137 logging.info('Taking screenshot named %s', screenshot_name) |
| 223 self.adb.TakeScreenshot(screenshot_name) | 138 self.adb.TakeScreenshot(screenshot_name) |
| 224 | 139 |
| (...skipping 18 matching lines...) Expand all Loading... |
| 243 # forwarder will stomp on them otherwise. | 158 # forwarder will stomp on them otherwise. |
| 244 port_pairs.append(http_server_ports) | 159 port_pairs.append(http_server_ports) |
| 245 self.forwarder = forwarder.Forwarder(self.adb, self.build_type) | 160 self.forwarder = forwarder.Forwarder(self.adb, self.build_type) |
| 246 self.forwarder.Run(port_pairs, self.tool, '127.0.0.1') | 161 self.forwarder.Run(port_pairs, self.tool, '127.0.0.1') |
| 247 self.flags.AddFlags(['--enable-test-intents']) | 162 self.flags.AddFlags(['--enable-test-intents']) |
| 248 | 163 |
| 249 def TearDown(self): | 164 def TearDown(self): |
| 250 """Cleans up the test harness and saves outstanding data from test run.""" | 165 """Cleans up the test harness and saves outstanding data from test run.""" |
| 251 if self.forwarder: | 166 if self.forwarder: |
| 252 self.forwarder.Close() | 167 self.forwarder.Close() |
| 253 self.GenerateCoverageReportIfNeeded() | |
| 254 super(TestRunner, self).TearDown() | 168 super(TestRunner, self).TearDown() |
| 255 | 169 |
| 256 def TestSetup(self, test): | 170 def TestSetup(self, test): |
| 257 """Sets up the test harness for running a particular test. | 171 """Sets up the test harness for running a particular test. |
| 258 | 172 |
| 259 Args: | 173 Args: |
| 260 test: The name of the test that will be run. | 174 test: The name of the test that will be run. |
| 261 """ | 175 """ |
| 262 self.SetupPerfMonitoringIfNeeded(test) | 176 self.SetupPerfMonitoringIfNeeded(test) |
| 263 self._SetupIndividualTestTimeoutScale(test) | 177 self._SetupIndividualTestTimeoutScale(test) |
| (...skipping 21 matching lines...) Expand all Loading... |
| 285 """ | 199 """ |
| 286 if not self._IsPerfTest(test): | 200 if not self._IsPerfTest(test): |
| 287 return | 201 return |
| 288 self.adb.Adb().SendCommand('shell rm ' + | 202 self.adb.Adb().SendCommand('shell rm ' + |
| 289 TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX) | 203 TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX) |
| 290 self.adb.StartMonitoringLogcat() | 204 self.adb.StartMonitoringLogcat() |
| 291 | 205 |
| 292 def TestTeardown(self, test, raw_result): | 206 def TestTeardown(self, test, raw_result): |
| 293 """Cleans up the test harness after running a particular test. | 207 """Cleans up the test harness after running a particular test. |
| 294 | 208 |
| 295 Depending on the options of this TestRunner this might handle coverage | 209 Depending on the options of this TestRunner this might handle performance |
| 296 tracking or performance tracking. This method will only be called if the | 210 tracking. This method will only be called if the test passed. |
| 297 test passed. | |
| 298 | 211 |
| 299 Args: | 212 Args: |
| 300 test: The name of the test that was just run. | 213 test: The name of the test that was just run. |
| 301 raw_result: result for this test. | 214 raw_result: result for this test. |
| 302 """ | 215 """ |
| 303 | 216 |
| 304 self.tool.CleanUpEnvironment() | 217 self.tool.CleanUpEnvironment() |
| 305 | 218 |
| 306 # The logic below relies on the test passing. | 219 # The logic below relies on the test passing. |
| 307 if not raw_result or raw_result.GetStatusCode(): | 220 if not raw_result or raw_result.GetStatusCode(): |
| 308 return | 221 return |
| 309 | 222 |
| 310 self.TearDownPerfMonitoring(test) | 223 self.TearDownPerfMonitoring(test) |
| 311 self.SaveCoverageData(test) | |
| 312 | 224 |
| 313 def TearDownPerfMonitoring(self, test): | 225 def TearDownPerfMonitoring(self, test): |
| 314 """Cleans up performance monitoring if the specified test required it. | 226 """Cleans up performance monitoring if the specified test required it. |
| 315 | 227 |
| 316 Args: | 228 Args: |
| 317 test: The name of the test that was just run. | 229 test: The name of the test that was just run. |
| 318 Raises: | 230 Raises: |
| 319 Exception: if there's anything wrong with the perf data. | 231 Exception: if there's anything wrong with the perf data. |
| 320 """ | 232 """ |
| 321 if not self._IsPerfTest(test): | 233 if not self._IsPerfTest(test): |
| (...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 462 duration_ms = 0 | 374 duration_ms = 0 |
| 463 message = str(e) | 375 message = str(e) |
| 464 if not message: | 376 if not message: |
| 465 message = 'No information.' | 377 message = 'No information.' |
| 466 results.AddResult(test_result.InstrumentationTestResult( | 378 results.AddResult(test_result.InstrumentationTestResult( |
| 467 test, base_test_result.ResultType.CRASH, start_date_ms, duration_ms, | 379 test, base_test_result.ResultType.CRASH, start_date_ms, duration_ms, |
| 468 log=message)) | 380 log=message)) |
| 469 raw_result = None | 381 raw_result = None |
| 470 self.TestTeardown(test, raw_result) | 382 self.TestTeardown(test, raw_result) |
| 471 return (results, None if results.DidRunPass() else test) | 383 return (results, None if results.DidRunPass() else test) |
| OLD | NEW |