Index: telemetry/telemetry/testing/browser_test_runner_unittest.py |
diff --git a/telemetry/telemetry/testing/browser_test_runner_unittest.py b/telemetry/telemetry/testing/browser_test_runner_unittest.py |
index cdfbdc300bf7f6367be54c28a9faf736b82659f4..2321e50a84e0c0148e916433631d9be8a887fa4a 100644 |
--- a/telemetry/telemetry/testing/browser_test_runner_unittest.py |
+++ b/telemetry/telemetry/testing/browser_test_runner_unittest.py |
@@ -18,7 +18,7 @@ from telemetry.testing import browser_test_runner |
class BrowserTestRunnerTest(unittest.TestCase): |
def baseTest(self, mockInitDependencyManager, test_filter, |
- failures, successes): |
+ failures, successes, test_name='SimpleTest'): |
options = browser_test_runner.TestRunOptions() |
options.verbosity = 0 |
config = project_config.ProjectConfig( |
@@ -33,7 +33,7 @@ class BrowserTestRunnerTest(unittest.TestCase): |
try: |
browser_test_runner.Run( |
config, options, |
- ['SimpleTest', |
+ [test_name, |
'--write-abbreviated-json-results-to=%s' % temp_file_name, |
'--test-filter=%s' % test_filter]) |
mockInitDependencyManager.assert_called_with(['a', 'b', 'c']) |
@@ -49,25 +49,38 @@ class BrowserTestRunnerTest(unittest.TestCase): |
def testJsonOutputFormatNegativeFilter(self, mockInitDependencyManager): |
self.baseTest( |
mockInitDependencyManager, '^(add|multiplier).*', |
- ['browser_tests.simple_numeric_test.SimpleTest.add_1_and_2', |
- 'browser_tests.simple_numeric_test.SimpleTest.add_7_and_3', |
- 'browser_tests.simple_numeric_test.SimpleTest.multiplier_simple_2'], |
- ['browser_tests.simple_numeric_test.SimpleTest.add_2_and_3', |
- 'browser_tests.simple_numeric_test.SimpleTest.multiplier_simple', |
- 'browser_tests.simple_numeric_test.SimpleTest.multiplier_simple_3']) |
+ ['add_1_and_2', |
+ 'add_7_and_3', |
+ 'multiplier_simple_2'], |
+ ['add_2_and_3', |
+ 'multiplier_simple', |
+ 'multiplier_simple_3']) |
+ |
+ @mock.patch('telemetry.internal.util.binary_manager.InitDependencyManager') |
+ def testJsonOutputWhenSetupClassFailed(self, mockInitDependencyManager): |
+ self.baseTest( |
+ mockInitDependencyManager, '.*', |
+ ['setUpClass (browser_tests.failed_tests.SetUpClassFailedTest)'], |
+ [], test_name='SetUpClassFailedTest') |
+ |
+ @mock.patch('telemetry.internal.util.binary_manager.InitDependencyManager') |
+ def testJsonOutputWhenTearDownClassFailed(self, mockInitDependencyManager): |
+ self.baseTest( |
+ mockInitDependencyManager, '.*', |
+ ['tearDownClass (browser_tests.failed_tests.TearDownClassFailedTest)'], |
+ sorted(['dummy_test_%i' %i for i in xrange(0, 100)]), |
+ test_name='TearDownClassFailedTest') |
@mock.patch('telemetry.internal.util.binary_manager.InitDependencyManager') |
def testJsonOutputFormatPositiveFilter(self, mockInitDependencyManager): |
self.baseTest( |
mockInitDependencyManager, '(TestSimple|TestException).*', |
- ['browser_tests.simple_numeric_test.SimpleTest.TestException', |
- 'browser_tests.simple_numeric_test.SimpleTest.TestSimple'], |
- []) |
+ ['TestException', 'TestSimple'], []) |
@mock.patch('telemetry.internal.util.binary_manager.InitDependencyManager') |
def testExecutingTestsInSortedOrder(self, mockInitDependencyManager): |
alphabetical_tests = [] |
- prefix = 'browser_tests.simple_numeric_test.SimpleTest.Alphabetical_' |
+ prefix = 'Alphabetical_' |
for i in xrange(20): |
alphabetical_tests.append(prefix + str(i)) |
for c in string.uppercase[:26]: |
@@ -81,7 +94,7 @@ class BrowserTestRunnerTest(unittest.TestCase): |
def shardingRangeTestHelper(self, total_shards, num_tests): |
shard_ranges = [] |
for shard_index in xrange(0, total_shards): |
- shard_ranges.append(browser_test_runner.TestRangeForShard( |
+ shard_ranges.append(browser_test_runner._TestRangeForShard( |
total_shards, shard_index, num_tests)) |
# Make assertions about ranges |
num_tests_run = 0 |
@@ -118,7 +131,10 @@ class BrowserTestRunnerTest(unittest.TestCase): |
self.shardingRangeTestHelper(1, 1) |
self.shardingRangeTestHelper(2, 1) |
- def baseShardingTest(self, total_shards, shard_index, failures, successes): |
+ def baseShardingTest(self, total_shards, shard_index, failures, successes, |
+ opt_abbr_input_json_file=None, |
+ opt_test_filter='', |
+ opt_filter_tests_after_sharding=False): |
options = browser_test_runner.TestRunOptions() |
options.verbosity = 0 |
config = project_config.ProjectConfig( |
@@ -130,13 +146,22 @@ class BrowserTestRunnerTest(unittest.TestCase): |
temp_file = tempfile.NamedTemporaryFile(delete=False) |
temp_file.close() |
temp_file_name = temp_file.name |
+ opt_args = [] |
+ if opt_abbr_input_json_file: |
+ opt_args += [ |
+ '--read-abbreviated-json-results-from=%s' % opt_abbr_input_json_file] |
+ if opt_test_filter: |
+ opt_args += [ |
+ '--test-filter=%s' % opt_test_filter] |
+ if opt_filter_tests_after_sharding: |
+ opt_args += ['--filter-tests-after-sharding'] |
try: |
browser_test_runner.Run( |
config, options, |
['SimpleShardingTest', |
'--write-abbreviated-json-results-to=%s' % temp_file_name, |
'--total-shards=%d' % total_shards, |
- '--shard-index=%d' % shard_index]) |
+ '--shard-index=%d' % shard_index] + opt_args) |
with open(temp_file_name) as f: |
test_result = json.load(f) |
self.assertEquals(test_result['failures'], failures) |
@@ -148,21 +173,117 @@ class BrowserTestRunnerTest(unittest.TestCase): |
@mock.patch('telemetry.internal.util.binary_manager.InitDependencyManager') |
def testShardedTestRun(self, _): |
self.baseShardingTest(3, 0, [], [ |
- 'browser_tests.simple_sharding_test.SimpleShardingTest.Test1', |
- 'browser_tests.simple_sharding_test.SimpleShardingTest.Test2', |
- 'browser_tests.simple_sharding_test.SimpleShardingTest.Test3', |
- 'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_0', |
- 'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_1', |
+ 'Test1', |
+ 'Test2', |
+ 'Test3', |
+ 'passing_test_0', |
+ 'passing_test_1', |
]) |
self.baseShardingTest(3, 1, [], [ |
- 'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_2', |
- 'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_3', |
- 'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_4', |
- 'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_5', |
+ 'passing_test_2', |
+ 'passing_test_3', |
+ 'passing_test_4', |
+ 'passing_test_5', |
]) |
self.baseShardingTest(3, 2, [], [ |
- 'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_6', |
- 'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_7', |
- 'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_8', |
- 'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_9', |
+ 'passing_test_6', |
+ 'passing_test_7', |
+ 'passing_test_8', |
+ 'passing_test_9', |
]) |
+ |
+ def writeMockTestResultsFile(self): |
+ mock_test_results = { |
+ 'passes': [ |
+ 'Test1', |
+ 'Test2', |
+ 'Test3', |
+ 'passing_test_0', |
+ 'passing_test_1', |
+ 'passing_test_2', |
+ 'passing_test_3', |
+ 'passing_test_4', |
+ 'passing_test_5', |
+ 'passing_test_6', |
+ 'passing_test_7', |
+ 'passing_test_8', |
+ 'passing_test_9', |
+ ], |
+ 'failures': [], |
+ 'valid': True, |
+ 'times': { |
+ 'Test1': 3.0, |
+ 'Test2': 3.0, |
+ 'Test3': 3.0, |
+ 'passing_test_0': 3.0, |
+ 'passing_test_1': 2.0, |
+ 'passing_test_2': 2.0, |
+ 'passing_test_3': 2.0, |
+ 'passing_test_4': 2.0, |
+ 'passing_test_5': 1.0, |
+ 'passing_test_6': 1.0, |
+ 'passing_test_7': 1.0, |
+ 'passing_test_8': 1.0, |
+ 'passing_test_9': 0.5, |
+ } |
+ } |
+ temp_file = tempfile.NamedTemporaryFile(delete=False) |
+ temp_file.close() |
+ temp_file_name = temp_file.name |
+ with open(temp_file_name, 'w') as f: |
+ json.dump(mock_test_results, f) |
+ return temp_file_name |
+ |
+ @mock.patch('telemetry.internal.util.binary_manager.InitDependencyManager') |
+ def testSplittingShardsByTimes(self, _): |
+ temp_file_name = self.writeMockTestResultsFile() |
+ # It seems that the sorting order of the first four tests above is: |
+ # passing_test_0, Test1, Test2, Test3 |
+ # This is probably because the relative order of the "fixed" tests |
+ # (starting with "Test") and the generated ones ("passing_") is |
+ # not well defined, and the sorting is stable afterward. The |
+ # expectations have been adjusted for this fact. |
+ try: |
+ self.baseShardingTest( |
+ 4, 0, [], |
+ ['passing_test_0', 'passing_test_1', |
+ 'passing_test_5', 'passing_test_9'], |
+ temp_file_name) |
+ self.baseShardingTest( |
+ 4, 1, [], |
+ ['Test1', 'passing_test_2', 'passing_test_6'], |
+ temp_file_name) |
+ self.baseShardingTest( |
+ 4, 2, [], |
+ ['Test2', 'passing_test_3', 'passing_test_7'], |
+ temp_file_name) |
+ self.baseShardingTest( |
+ 4, 3, [], |
+ ['Test3', 'passing_test_4', 'passing_test_8'], |
+ temp_file_name) |
+ finally: |
+ os.remove(temp_file_name) |
+ |
+ @mock.patch('telemetry.internal.util.binary_manager.InitDependencyManager') |
+ def testFilteringAfterSharding(self, _): |
+ temp_file_name = self.writeMockTestResultsFile() |
+ try: |
+ self.baseShardingTest( |
+ 4, 1, [], |
+ ['Test1', 'passing_test_2', 'passing_test_6'], |
+ temp_file_name, |
+ opt_test_filter='(Test1|passing_test_2|passing_test_6)', |
+ opt_filter_tests_after_sharding=True) |
+ finally: |
+ os.remove(temp_file_name) |
+ |
+ @mock.patch('telemetry.internal.util.binary_manager.InitDependencyManager') |
+ def testMedianComputation(self, _): |
+ self.assertEquals(2.0, browser_test_runner._MedianTestTime( |
+ {'test1': 2.0, 'test2': 7.0, 'test3': 1.0})) |
+ self.assertEquals(2.0, browser_test_runner._MedianTestTime( |
+ {'test1': 2.0})) |
+ self.assertEquals(0.0, browser_test_runner._MedianTestTime({})) |
+ self.assertEqual(4.0, browser_test_runner._MedianTestTime( |
+ {'test1': 2.0, 'test2': 6.0, 'test3': 1.0, 'test4': 8.0})) |
+ |