Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 """Runs the Java tests. See more information on run_instrumentation_tests.py.""" | 5 """Runs the Java tests. See more information on run_instrumentation_tests.py.""" |
| 6 | 6 |
| 7 import fnmatch | 7 import fnmatch |
| 8 import logging | 8 import logging |
| 9 import os | 9 import os |
| 10 import re | 10 import re |
| (...skipping 22 matching lines...) Expand all Loading... | |
| 33 """A fatal test exception.""" | 33 """A fatal test exception.""" |
| 34 pass | 34 pass |
| 35 | 35 |
| 36 | 36 |
| 37 def _TestNameToExpectation(test_name): | 37 def _TestNameToExpectation(test_name): |
| 38 # A test name is a Package.Path.Class#testName; convert to what we use in | 38 # A test name is a Package.Path.Class#testName; convert to what we use in |
| 39 # the expectation file. | 39 # the expectation file. |
| 40 return '.'.join(test_name.replace('#', '.').split('.')[-2:]) | 40 return '.'.join(test_name.replace('#', '.').split('.')[-2:]) |
| 41 | 41 |
| 42 | 42 |
| 43 # TODO(jaydeepmehta): FilterTests should be moved to a common file for | |
|
bulach
2012/10/15 19:31:55
ditto..
felipeg
2012/10/16 14:11:44
Done.
| |
| 44 # all integration tests. | |
| 43 def FilterTests(test_names, pattern_list, inclusive): | 45 def FilterTests(test_names, pattern_list, inclusive): |
| 44 """Filters |test_names| using a list of patterns. | 46 """Filters |test_names| using a list of patterns. |
| 45 | 47 |
| 46 Args: | 48 Args: |
| 47 test_names: A list of test names. | 49 test_names: A list of test names. |
| 48 pattern_list: A list of patterns. | 50 pattern_list: A list of patterns. |
| 49 inclusive: If True, returns the tests that match any pattern. if False, | 51 inclusive: If True, returns the tests that match any pattern. if False, |
| 50 returns the tests that do not match any pattern. | 52 returns the tests that do not match any pattern. |
| 51 Returns: | 53 Returns: |
| 52 A list of test names. | 54 A list of test names. |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 123 self.wait_for_debugger = options.wait_for_debugger | 125 self.wait_for_debugger = options.wait_for_debugger |
| 124 | 126 |
| 125 self.tests_iter = tests_iter | 127 self.tests_iter = tests_iter |
| 126 self.coverage = coverage | 128 self.coverage = coverage |
| 127 self.apks = apks | 129 self.apks = apks |
| 128 self.test_apk = apks[0] | 130 self.test_apk = apks[0] |
| 129 self.instrumentation_class_path = self.test_apk.GetPackageName() | 131 self.instrumentation_class_path = self.test_apk.GetPackageName() |
| 130 self.ports_to_forward = ports_to_forward | 132 self.ports_to_forward = ports_to_forward |
| 131 | 133 |
| 132 self.test_results = TestResults() | 134 self.test_results = TestResults() |
| 133 # List of forwarders created by this instance of TestRunner. | 135 self.forwarder = None |
| 134 self.forwarders = [] | |
| 135 | 136 |
| 136 if self.coverage: | 137 if self.coverage: |
| 137 if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME): | 138 if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME): |
| 138 os.remove(TestRunner._COVERAGE_MERGED_FILENAME) | 139 os.remove(TestRunner._COVERAGE_MERGED_FILENAME) |
| 139 if not os.path.exists(TestRunner._COVERAGE_META_INFO_PATH): | 140 if not os.path.exists(TestRunner._COVERAGE_META_INFO_PATH): |
| 140 raise FatalTestException('FATAL ERROR in ' + sys.argv[0] + | 141 raise FatalTestException('FATAL ERROR in ' + sys.argv[0] + |
| 141 ' : Coverage meta info [' + | 142 ' : Coverage meta info [' + |
| 142 TestRunner._COVERAGE_META_INFO_PATH + | 143 TestRunner._COVERAGE_META_INFO_PATH + |
| 143 '] does not exist.') | 144 '] does not exist.') |
| 144 if (not TestRunner._COVERAGE_WEB_ROOT_DIR or | 145 if (not TestRunner._COVERAGE_WEB_ROOT_DIR or |
| (...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 263 if not self.adb.IsRootEnabled(): | 264 if not self.adb.IsRootEnabled(): |
| 264 logging.warning('Unable to enable java asserts for %s, non rooted device', | 265 logging.warning('Unable to enable java asserts for %s, non rooted device', |
| 265 self.device) | 266 self.device) |
| 266 else: | 267 else: |
| 267 if self.adb.SetJavaAssertsEnabled(enable=True): | 268 if self.adb.SetJavaAssertsEnabled(enable=True): |
| 268 self.adb.Reboot(full_reboot=False) | 269 self.adb.Reboot(full_reboot=False) |
| 269 | 270 |
| 270 # We give different default value to launch HTTP server based on shard index | 271 # We give different default value to launch HTTP server based on shard index |
| 271 # because it may have race condition when multiple processes are trying to | 272 # because it may have race condition when multiple processes are trying to |
| 272 # launch lighttpd with same port at same time. | 273 # launch lighttpd with same port at same time. |
| 273 # This line *must* come before the forwarding below, as it nukes all | 274 http_server_ports = self.LaunchTestHttpServer( |
| 274 # the other forwarders. A more comprehensive fix might be to pull the | 275 os.path.join(constants.CHROME_DIR), |
| 275 # forwarder-killing line up to here, but that might violate assumptions | 276 (constants.LIGHTTPD_RANDOM_PORT_FIRST + self.shard_index)) |
| 276 # implicit in other places. | |
| 277 self.LaunchTestHttpServer(os.path.join(constants.CHROME_DIR), | |
| 278 (constants.LIGHTTPD_RANDOM_PORT_FIRST + | |
| 279 self.shard_index)) | |
| 280 | |
| 281 if self.ports_to_forward: | 277 if self.ports_to_forward: |
| 282 for port in self.ports_to_forward: | 278 port_pairs = [(port, port) for port in self.ports_to_forward] |
| 283 self.forwarders.append(Forwarder( | 279 # We need to remember which ports the HTTP server is using, since the |
| 284 self.adb, [(port, port)], self.tool, '127.0.0.1', self.build_type)) | 280 # forwarder will stomp on them otherwise. |
| 281 port_pairs.append(http_server_ports) | |
| 282 self.forwarder = Forwarder( | |
| 283 self.adb, port_pairs, self.tool, '127.0.0.1', self.build_type) | |
| 285 self.CopyTestFilesOnce() | 284 self.CopyTestFilesOnce() |
| 286 self.flags.AddFlags(['--enable-test-intents']) | 285 self.flags.AddFlags(['--enable-test-intents']) |
| 287 | 286 |
| 288 def TearDown(self): | 287 def TearDown(self): |
| 289 """Cleans up the test harness and saves outstanding data from test run.""" | 288 """Cleans up the test harness and saves outstanding data from test run.""" |
| 290 if self.forwarders: | 289 if self.forwarder: |
| 291 for forwarder in self.forwarders: | 290 self.forwarder.Close() |
| 292 forwarder.Close() | |
| 293 self.GenerateCoverageReportIfNeeded() | 291 self.GenerateCoverageReportIfNeeded() |
| 294 super(TestRunner, self).TearDown() | 292 super(TestRunner, self).TearDown() |
| 295 | 293 |
| 296 def TestSetup(self, test): | 294 def TestSetup(self, test): |
| 297 """Sets up the test harness for running a particular test. | 295 """Sets up the test harness for running a particular test. |
| 298 | 296 |
| 299 Args: | 297 Args: |
| 300 test: The name of the test that will be run. | 298 test: The name of the test that will be run. |
| 301 """ | 299 """ |
| 302 self.SetupPerfMonitoringIfNeeded(test) | 300 self.SetupPerfMonitoringIfNeeded(test) |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 356 Args: | 354 Args: |
| 357 test: The name of the test that was just run. | 355 test: The name of the test that was just run. |
| 358 Raises: | 356 Raises: |
| 359 FatalTestException: if there's anything wrong with the perf data. | 357 FatalTestException: if there's anything wrong with the perf data. |
| 360 """ | 358 """ |
| 361 if not self._IsPerfTest(test): | 359 if not self._IsPerfTest(test): |
| 362 return | 360 return |
| 363 raw_test_name = test.split('#')[1] | 361 raw_test_name = test.split('#')[1] |
| 364 | 362 |
| 365 # Wait and grab annotation data so we can figure out which traces to parse | 363 # Wait and grab annotation data so we can figure out which traces to parse |
| 364 # TODO(tonyg): Is there an error log line to watch for here? | |
|
bulach
2012/10/15 19:31:55
ditto..
felipeg
2012/10/16 14:11:44
Done.
| |
| 366 regex = self.adb.WaitForLogMatch(re.compile('\*\*PERFANNOTATION\(' + | 365 regex = self.adb.WaitForLogMatch(re.compile('\*\*PERFANNOTATION\(' + |
| 367 raw_test_name + | 366 raw_test_name + |
| 368 '\)\:(.*)'), None) | 367 '\)\:(.*)'), None) |
| 369 | 368 |
| 370 # If the test is set to run on a specific device type only (IE: only | 369 # If the test is set to run on a specific device type only (IE: only |
| 371 # tablet or phone) and it is being run on the wrong device, the test | 370 # tablet or phone) and it is being run on the wrong device, the test |
| 372 # just quits and does not do anything. The java test harness will still | 371 # just quits and does not do anything. The java test harness will still |
| 373 # print the appropriate annotation for us, but will add --NORUN-- for | 372 # print the appropriate annotation for us, but will add --NORUN-- for |
| 374 # us so we know to ignore the results. | 373 # us so we know to ignore the results. |
| 375 # The --NORUN-- tag is managed by MainActivityTestBase.java | 374 # The --NORUN-- tag is managed by MainActivityTestBase.java |
| (...skipping 211 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 587 | 586 |
| 588 logging.info('Will run: %s', str(tests)) | 587 logging.info('Will run: %s', str(tests)) |
| 589 | 588 |
| 590 if len(attached_devices) > 1 and (coverage or options.wait_for_debugger): | 589 if len(attached_devices) > 1 and (coverage or options.wait_for_debugger): |
| 591 logging.warning('Coverage / debugger can not be sharded, ' | 590 logging.warning('Coverage / debugger can not be sharded, ' |
| 592 'using first available device') | 591 'using first available device') |
| 593 attached_devices = attached_devices[:1] | 592 attached_devices = attached_devices[:1] |
| 594 sharder = TestSharder(attached_devices, options, tests, apks) | 593 sharder = TestSharder(attached_devices, options, tests, apks) |
| 595 test_results = sharder.RunShardedTests() | 594 test_results = sharder.RunShardedTests() |
| 596 return test_results | 595 return test_results |
| OLD | NEW |