| OLD | NEW |
| (Empty) |
| 1 #!/usr/bin/env python | |
| 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 3 # Use of this source code is governed by a BSD-style license that can be | |
| 4 # found in the LICENSE file. | |
| 5 | |
| 6 import json | |
| 7 import logging | |
| 8 import os | |
| 9 import re | |
| 10 import shutil | |
| 11 import subprocess | |
| 12 import sys | |
| 13 import tempfile | |
| 14 import unittest | |
| 15 from xml.dom import minidom | |
| 16 | |
| 17 ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) | |
| 18 sys.path.insert(0, ROOT_DIR) | |
| 19 sys.path.append(os.path.join(ROOT_DIR, 'tests', 'gtest_fake')) | |
| 20 | |
| 21 import gtest_fake_base | |
| 22 import run_test_cases | |
| 23 | |
| 24 | |
| 25 def RunTest(arguments): | |
| 26 cmd = [ | |
| 27 sys.executable, | |
| 28 os.path.join(ROOT_DIR, 'run_test_cases.py'), | |
| 29 ] + arguments | |
| 30 | |
| 31 logging.debug(' '.join(cmd)) | |
| 32 # Do not use universal_newline=True since run_test_cases uses CR extensively. | |
| 33 proc = subprocess.Popen( | |
| 34 cmd, | |
| 35 stdout=subprocess.PIPE, | |
| 36 stderr=subprocess.PIPE) | |
| 37 | |
| 38 # pylint is confused. | |
| 39 out, err = proc.communicate() or ('', '') | |
| 40 if sys.platform == 'win32': | |
| 41 # Downgrade CRLF to LF. | |
| 42 out = out.replace('\r\n', '\n') | |
| 43 err = err.replace('\r\n', '\n') | |
| 44 else: | |
| 45 # Upgrade CR to LF. | |
| 46 out = out.replace('\r', '\n') | |
| 47 err = err.replace('\r', '\n') | |
| 48 | |
| 49 return (out, err, proc.returncode) | |
| 50 | |
| 51 | |
| 52 def trim_xml_whitespace(data): | |
| 53 """Recursively remove non-important elements.""" | |
| 54 children_to_remove = [] | |
| 55 for child in data.childNodes: | |
| 56 if child.nodeType == minidom.Node.TEXT_NODE and not child.data.strip(): | |
| 57 children_to_remove.append(child) | |
| 58 elif child.nodeType == minidom.Node.COMMENT_NODE: | |
| 59 children_to_remove.append(child) | |
| 60 elif child.nodeType == minidom.Node.ELEMENT_NODE: | |
| 61 trim_xml_whitespace(child) | |
| 62 for child in children_to_remove: | |
| 63 data.removeChild(child) | |
| 64 | |
| 65 | |
| 66 def load_xml_as_string_and_filter(filepath): | |
| 67 """Serializes XML to a list of strings that is consistently formatted | |
| 68 (ignoring whitespace between elements) so that it may be compared. | |
| 69 """ | |
| 70 with open(filepath, 'rb') as f: | |
| 71 xml = minidom.parse(f) | |
| 72 | |
| 73 trim_xml_whitespace(xml) | |
| 74 | |
| 75 # Very hardcoded for expected.xml. | |
| 76 xml.childNodes[0].attributes['time'] = "0.1" | |
| 77 xml.childNodes[0].attributes['timestamp'] = "1996" | |
| 78 xml.childNodes[0].childNodes[0].childNodes[0].attributes['time'] = "0.2" | |
| 79 xml.childNodes[0].childNodes[0].childNodes[1].attributes['time'] = "0.2" | |
| 80 xml.childNodes[0].childNodes[0].childNodes[2].attributes['time'] = "0.2" | |
| 81 return xml.toprettyxml(indent=' ').splitlines() | |
| 82 | |
| 83 | |
| 84 def get_test_re(test_name, failed, duration): | |
| 85 return [ | |
| 86 re.escape(i) | |
| 87 for i in gtest_fake_base.get_test_output( | |
| 88 test_name, failed, duration).splitlines() | |
| 89 ] + [''] | |
| 90 | |
| 91 | |
| 92 def get_footer_re(nb): | |
| 93 return [ | |
| 94 re.escape(i) for i in gtest_fake_base.get_footer(nb, nb).splitlines() | |
| 95 ] + [''] | |
| 96 | |
| 97 | |
| 98 def get_whole_test_re(test_name, failed, duration, with_filter): | |
| 99 """Generates a list of regexp to parse the lines of a test case output.""" | |
| 100 out = [] | |
| 101 if with_filter: | |
| 102 out.append(re.escape('Note: Google Test filter = %s' % with_filter)) | |
| 103 out.append('') | |
| 104 nb = with_filter.count(':') + 1 | |
| 105 else: | |
| 106 nb = 1 | |
| 107 out.extend(get_test_re(test_name, failed, duration)) | |
| 108 out.extend(get_footer_re(nb)) | |
| 109 out.append('') | |
| 110 return out | |
| 111 | |
| 112 | |
| 113 class RunTestCases(unittest.TestCase): | |
| 114 def setUp(self): | |
| 115 super(RunTestCases, self).setUp() | |
| 116 # Make sure there's no environment variable that could do side effects. | |
| 117 os.environ.pop('GTEST_SHARD_INDEX', '') | |
| 118 os.environ.pop('GTEST_TOTAL_SHARDS', '') | |
| 119 self._tempdirpath = None | |
| 120 self._tempfilename = None | |
| 121 | |
| 122 def tearDown(self): | |
| 123 if self._tempdirpath and os.path.exists(self._tempdirpath): | |
| 124 shutil.rmtree(self._tempdirpath) | |
| 125 if self._tempfilename and os.path.exists(self._tempfilename): | |
| 126 os.remove(self._tempfilename) | |
| 127 super(RunTestCases, self).tearDown() | |
| 128 | |
| 129 @property | |
| 130 def tempdirpath(self): | |
| 131 if not self._tempdirpath: | |
| 132 self._tempdirpath = tempfile.mkdtemp(prefix='run_test_cases') | |
| 133 return self._tempdirpath | |
| 134 | |
| 135 @property | |
| 136 def filename(self): | |
| 137 if not self._tempfilename: | |
| 138 handle, self._tempfilename = tempfile.mkstemp( | |
| 139 prefix='run_test_cases', suffix='.run_test_cases') | |
| 140 os.close(handle) | |
| 141 return self._tempfilename | |
| 142 | |
| 143 def _check_results(self, expected_out_re, out, err): | |
| 144 lines = out.splitlines() | |
| 145 | |
| 146 for index in range(len(expected_out_re)): | |
| 147 if not lines: | |
| 148 self.fail('%s\nerr:\n%s' % ('\n'.join(expected_out_re[index:]), err)) | |
| 149 line = lines.pop(0) | |
| 150 if not re.match('^%s$' % expected_out_re[index], line): | |
| 151 self.fail( | |
| 152 '\nIndex: %d\nExpected: %r\nLine: %r\nNext lines:\n%s\nErr:\n%s' % ( | |
| 153 index, | |
| 154 expected_out_re[index], | |
| 155 line, | |
| 156 '\n'.join(lines[:5]), | |
| 157 err)) | |
| 158 self.assertEqual([], lines) | |
| 159 self.assertEqual('', err) | |
| 160 | |
| 161 def _check_results_file( | |
| 162 self, fail, flaky, success, missing, test_cases, duration): | |
| 163 self.assertTrue(os.path.exists(self.filename)) | |
| 164 | |
| 165 with open(self.filename) as f: | |
| 166 actual = json.load(f) | |
| 167 | |
| 168 self.assertEqual( | |
| 169 [ | |
| 170 u'duration', u'expected', u'fail', u'flaky', u'missing', u'success', | |
| 171 u'test_cases', | |
| 172 ], | |
| 173 sorted(actual)) | |
| 174 | |
| 175 if duration: | |
| 176 self.assertTrue(actual['duration'] > 0.0000001) | |
| 177 else: | |
| 178 self.assertEqual(actual['duration'], 0) | |
| 179 self.assertEqual(fail, actual['fail']) | |
| 180 self.assertEqual(flaky, actual['flaky']) | |
| 181 self.assertEqual(missing, actual['missing']) | |
| 182 self.assertEqual(success, actual['success']) | |
| 183 self.assertEqual(len(test_cases), len(actual['test_cases'])) | |
| 184 for (entry_name, entry_count) in test_cases: | |
| 185 self.assertTrue(entry_name in actual['test_cases']) | |
| 186 self.assertEqual( | |
| 187 entry_count, | |
| 188 len(actual['test_cases'][entry_name]), | |
| 189 (entry_count, len(actual['test_cases'][entry_name]), entry_name)) | |
| 190 | |
| 191 def test_simple_pass(self): | |
| 192 out, err, return_code = RunTest( | |
| 193 [ | |
| 194 '--clusters', '1', | |
| 195 '--jobs', '3', | |
| 196 '--result', self.filename, | |
| 197 os.path.join(ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_pass.py'), | |
| 198 ]) | |
| 199 | |
| 200 self.assertEqual(0, return_code) | |
| 201 | |
| 202 expected_out_re = [ | |
| 203 r'\[\d/\d\] \d\.\d\ds .+', | |
| 204 r'\[\d/\d\] \d\.\d\ds .+', | |
| 205 r'\[\d/\d\] \d\.\d\ds .+', | |
| 206 re.escape('Summary:'), | |
| 207 re.escape(' Success: 3 100.00% ') + r' +\d+\.\d\ds', | |
| 208 re.escape(' Flaky: 0 0.00% ') + r' +\d+\.\d\ds', | |
| 209 re.escape(' Fail: 0 0.00% ') + r' +\d+\.\d\ds', | |
| 210 r' \d+\.\d\ds Done running 3 tests with 3 executions. \d+\.\d\d test/s', | |
| 211 ] | |
| 212 self._check_results(expected_out_re, out, err) | |
| 213 | |
| 214 test_cases = [ | |
| 215 ('Foo.Bar1', 1), | |
| 216 ('Foo.Bar2', 1), | |
| 217 ('Foo.Bar/3', 1) | |
| 218 ] | |
| 219 self._check_results_file( | |
| 220 fail=[], | |
| 221 flaky=[], | |
| 222 missing=[], | |
| 223 success=sorted([u'Foo.Bar1', u'Foo.Bar2', u'Foo.Bar/3']), | |
| 224 test_cases=test_cases, | |
| 225 duration=True) | |
| 226 | |
| 227 def test_simple_pass_cluster(self): | |
| 228 # In milliseconds. | |
| 229 for duration in (10, 100, 1000): | |
| 230 try: | |
| 231 out, err, return_code = RunTest( | |
| 232 [ | |
| 233 '--clusters', '10', | |
| 234 '--jobs', '1', | |
| 235 '--result', self.filename, | |
| 236 os.path.join( | |
| 237 ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_slow.py'), | |
| 238 str(duration), | |
| 239 ]) | |
| 240 | |
| 241 self.assertEqual(0, return_code) | |
| 242 | |
| 243 expected_out_re = [ | |
| 244 r'\[\d/\d\] (\d\.\d\d)s .+', | |
| 245 r'\[\d/\d\] (\d\.\d\d)s .+', | |
| 246 r'\[\d/\d\] (\d\.\d\d)s .+', | |
| 247 re.escape('Summary:'), | |
| 248 re.escape(' Success: 3 100.00% ') + r' +\d+\.\d\ds', | |
| 249 re.escape(' Flaky: 0 0.00% ') + r' +\d+\.\d\ds', | |
| 250 re.escape(' Fail: 0 0.00% ') + r' +\d+\.\d\ds', | |
| 251 r' \d+\.\d\ds Done running 3 tests with 3 executions. ' | |
| 252 '\d+\.\d\d test/s', | |
| 253 ] | |
| 254 self._check_results(expected_out_re, out, err) | |
| 255 # Now specifically assert that the 3 first lines has monotonically | |
| 256 # increasing values, which doesn't happen if the results were not | |
| 257 # streamed. | |
| 258 lines = out.splitlines() | |
| 259 values = [re.match(expected_out_re[i], lines[i]) for i in range(3)] | |
| 260 self.assertTrue(all(values)) | |
| 261 values = [float(m.group(1)) for m in values] | |
| 262 self.assertTrue(values[0] < values[1]) | |
| 263 self.assertTrue(values[1] < values[2]) | |
| 264 | |
| 265 test_cases = [ | |
| 266 ('Foo.Bar1', 1), | |
| 267 ('Foo.Bar2', 1), | |
| 268 ('Foo.Bar/3', 1) | |
| 269 ] | |
| 270 self._check_results_file( | |
| 271 fail=[], | |
| 272 flaky=[], | |
| 273 missing=[], | |
| 274 success=sorted([u'Foo.Bar1', u'Foo.Bar2', u'Foo.Bar/3']), | |
| 275 test_cases=test_cases, | |
| 276 duration=True) | |
| 277 except AssertionError: | |
| 278 if duration != 1000: | |
| 279 print('Trying more slowly') | |
| 280 continue | |
| 281 raise | |
| 282 | |
| 283 def test_simple_pass_verbose(self): | |
| 284 # We take verbosity seriously so test it. | |
| 285 out, err, return_code = RunTest( | |
| 286 [ | |
| 287 # Linearize execution. | |
| 288 '--clusters', '1', | |
| 289 '--jobs', '1', | |
| 290 '--verbose', | |
| 291 '--result', self.filename, | |
| 292 os.path.join(ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_pass.py'), | |
| 293 ]) | |
| 294 | |
| 295 self.assertEqual(0, return_code) | |
| 296 | |
| 297 expected_out_re = [] | |
| 298 test_cases = ( | |
| 299 'Foo.Bar2', | |
| 300 'Foo.Bar1', | |
| 301 'Foo.Bar/3', | |
| 302 ) | |
| 303 | |
| 304 for index, name in enumerate(test_cases): | |
| 305 expected_out_re.append( | |
| 306 r'\[%d/3\] \d\.\d\ds ' % (index + 1) + re.escape(name) + ' .+') | |
| 307 expected_out_re.extend(get_whole_test_re(name, False, '100', name)) | |
| 308 | |
| 309 expected_out_re.extend([ | |
| 310 re.escape('Summary:'), | |
| 311 re.escape(' Success: 3 100.00%') + r' +\d+\.\d\ds', | |
| 312 re.escape(' Flaky: 0 0.00%') + r' +\d+\.\d\ds', | |
| 313 re.escape(' Fail: 0 0.00%') + r' +\d+\.\d\ds', | |
| 314 r' \d+\.\d\ds Done running 3 tests with 3 executions. \d+\.\d\d test/s', | |
| 315 ]) | |
| 316 self._check_results(expected_out_re, out, '') | |
| 317 # Test 'err' manually. | |
| 318 self.assertTrue( | |
| 319 re.match( | |
| 320 r'INFO run_test_cases\(\d+\)\: Found 3 test cases in \S+ ' | |
| 321 r'\S+gtest_fake_pass.py', | |
| 322 err.strip()), | |
| 323 err) | |
| 324 | |
| 325 def test_simple_pass_very_verbose(self): | |
| 326 # Ensure that the test still passes with maximum verbosity, but don't worry | |
| 327 # about what the output is. | |
| 328 # We take verbosity seriously so test it. | |
| 329 _, err, return_code = RunTest( | |
| 330 [ | |
| 331 # Linearize execution. | |
| 332 '--clusters', '1', | |
| 333 '--jobs', '1', | |
| 334 '--verbose', | |
| 335 '--verbose', | |
| 336 '--verbose', | |
| 337 '--result', self.filename, | |
| 338 os.path.join(ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_pass.py'), | |
| 339 ]) | |
| 340 | |
| 341 self.assertEqual(0, return_code, err) | |
| 342 | |
| 343 def test_simple_fail(self): | |
| 344 out, err, return_code = RunTest( | |
| 345 [ | |
| 346 # Linearize execution. | |
| 347 '--clusters', '1', | |
| 348 '--jobs', '1', | |
| 349 '--result', self.filename, | |
| 350 os.path.join(ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_fail.py'), | |
| 351 ]) | |
| 352 | |
| 353 self.assertEqual(1, return_code) | |
| 354 | |
| 355 test_failure_output = get_whole_test_re('Baz.Fail', True, '100', 'Baz.Fail') | |
| 356 expected_out_re = [ | |
| 357 r'\[1/\d\] \d\.\d\ds .+', | |
| 358 r'\[2/\d\] \d\.\d\ds .+', | |
| 359 r'\[3/\d\] \d\.\d\ds .+', | |
| 360 r'\[4/\d\] \d\.\d\ds .+', | |
| 361 ] + test_failure_output + [ | |
| 362 # Retries | |
| 363 r'\[5/\d\] \d\.\d\ds .+ retry \#1', | |
| 364 ] + test_failure_output + [ | |
| 365 re.escape(l) for l in run_test_cases.running_serial_warning() | |
| 366 ] + [ | |
| 367 r'\[6/\d\] \d\.\d\ds .+ retry \#2', | |
| 368 ] + test_failure_output + [ | |
| 369 re.escape('Failed tests:'), | |
| 370 re.escape(' Baz.Fail'), | |
| 371 re.escape('Summary:'), | |
| 372 re.escape(' Success: 3 75.00%') + r' +\d+\.\d\ds', | |
| 373 re.escape(' Flaky: 0 0.00%') + r' +\d+\.\d\ds', | |
| 374 re.escape(' Fail: 1 25.00%') + r' +\d+\.\d\ds', | |
| 375 r' \d+\.\d\ds Done running 4 tests with 6 executions. \d+\.\d\d test/s', | |
| 376 ] | |
| 377 self._check_results(expected_out_re, out, err) | |
| 378 | |
| 379 test_cases = [ | |
| 380 ('Foo.Bar1', 1), | |
| 381 ('Foo.Bar2', 1), | |
| 382 ('Foo.Bar3', 1), | |
| 383 ('Baz.Fail', 3) | |
| 384 ] | |
| 385 self._check_results_file( | |
| 386 fail=['Baz.Fail'], | |
| 387 flaky=[], | |
| 388 missing=[], | |
| 389 success=[u'Foo.Bar1', u'Foo.Bar2', u'Foo.Bar3'], | |
| 390 test_cases=test_cases, | |
| 391 duration=True) | |
| 392 | |
| 393 def test_simple_fail_verbose(self): | |
| 394 # We take verbosity seriously so test it. | |
| 395 out, err, return_code = RunTest( | |
| 396 [ | |
| 397 # Linearize execution. | |
| 398 '--clusters', '1', | |
| 399 '--jobs', '1', | |
| 400 '--verbose', | |
| 401 '--result', self.filename, | |
| 402 os.path.join(ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_fail.py'), | |
| 403 ]) | |
| 404 | |
| 405 self.assertEqual(1, return_code) | |
| 406 | |
| 407 expected_out_re = [] | |
| 408 test_cases = ( | |
| 409 'Foo.Bar3', | |
| 410 'Foo.Bar1', | |
| 411 'Foo.Bar2', | |
| 412 'Baz.Fail', | |
| 413 'Baz.Fail', | |
| 414 'Baz.Fail', | |
| 415 ) | |
| 416 | |
| 417 for index, name in enumerate(test_cases): | |
| 418 if index + 1 == len(test_cases): | |
| 419 # We are about to retry the test serially, so check for the warning. | |
| 420 expected_out_re.extend( | |
| 421 re.escape(l) for l in run_test_cases.running_serial_warning()) | |
| 422 expected_out_re.append( | |
| 423 r'\[%d/\d\] \d\.\d\ds ' % (index + 1) + re.escape(name) + ' .+') | |
| 424 expected_out_re.extend( | |
| 425 get_whole_test_re(name, 'Fail' in name, '100', name)) | |
| 426 | |
| 427 expected_out_re.extend([ | |
| 428 re.escape('Failed tests:'), | |
| 429 re.escape(' Baz.Fail'), | |
| 430 re.escape('Summary:'), | |
| 431 re.escape(' Success: 3 75.00%') + r' +\d+\.\d\ds', | |
| 432 re.escape(' Flaky: 0 0.00%') + r' +\d+\.\d\ds', | |
| 433 re.escape(' Fail: 1 25.00%') + r' +\d+\.\d\ds', | |
| 434 r' \d+\.\d\ds Done running 4 tests with 6 executions. \d+\.\d\d test/s', | |
| 435 ]) | |
| 436 self._check_results(expected_out_re, out, '') | |
| 437 | |
| 438 # Test 'err' manually. | |
| 439 self.assertTrue( | |
| 440 re.match( | |
| 441 r'INFO run_test_cases\(\d+\)\: Found 4 test cases in \S+ ' | |
| 442 r'\S+gtest_fake_fail.py', | |
| 443 err.strip()), | |
| 444 err) | |
| 445 test_cases = [ | |
| 446 ('Foo.Bar1', 1), | |
| 447 ('Foo.Bar2', 1), | |
| 448 ('Foo.Bar3', 1), | |
| 449 ('Baz.Fail', 3) | |
| 450 ] | |
| 451 self._check_results_file( | |
| 452 fail=['Baz.Fail'], | |
| 453 flaky=[], | |
| 454 missing=[], | |
| 455 success=[u'Foo.Bar1', u'Foo.Bar2', u'Foo.Bar3'], | |
| 456 test_cases=test_cases, | |
| 457 duration=True) | |
| 458 | |
| 459 def test_simple_gtest_list_error(self): | |
| 460 out, err, return_code = RunTest( | |
| 461 [ | |
| 462 '--no-dump', | |
| 463 os.path.join(ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_error.py'), | |
| 464 ]) | |
| 465 | |
| 466 expected_out_re = [ | |
| 467 'Failed to list test cases', | |
| 468 'Failed to run %s %s --gtest_list_tests' % ( | |
| 469 re.escape(sys.executable), | |
| 470 re.escape( | |
| 471 os.path.join( | |
| 472 ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_error.py'))), | |
| 473 'stdout:', | |
| 474 '', | |
| 475 'stderr:', | |
| 476 'Unable to list tests' | |
| 477 ] | |
| 478 | |
| 479 self.assertEqual(1, return_code) | |
| 480 self._check_results(expected_out_re, out, err) | |
| 481 | |
| 482 def test_gtest_list_tests(self): | |
| 483 out, err, return_code = RunTest( | |
| 484 [ | |
| 485 '--gtest_list_tests', | |
| 486 os.path.join(ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_fail.py'), | |
| 487 ]) | |
| 488 | |
| 489 expected_out = ( | |
| 490 'Foo.\n Bar1\n Bar2\n Bar3\nBaz.\n Fail\n' | |
| 491 ' YOU HAVE 2 tests with ignored failures (FAILS prefix)\n\n') | |
| 492 self.assertEqual('', err) | |
| 493 self.assertEqual(expected_out, out) | |
| 494 self.assertEqual(0, return_code) | |
| 495 | |
| 496 def test_flaky_stop_early(self): | |
| 497 # gtest_fake_flaky.py has Foo.Bar[1-9]. Each of the test fails once and | |
| 498 # succeeds on the second pass. | |
| 499 out, err, return_code = RunTest( | |
| 500 [ | |
| 501 '--result', self.filename, | |
| 502 # Linearize execution. | |
| 503 '--clusters', '1', | |
| 504 '--jobs', '1', | |
| 505 '--retries', '1', | |
| 506 '--max-failures', '2', | |
| 507 os.path.join( | |
| 508 ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_flaky.py'), | |
| 509 self.tempdirpath, | |
| 510 ]) | |
| 511 self.assertEqual(1, return_code) | |
| 512 # Give up on checking the stdout. | |
| 513 self.assertTrue('STOPPED EARLY' in out, out) | |
| 514 self.assertEqual('', err) | |
| 515 # The order is determined by the test shuffling. | |
| 516 test_cases = [ | |
| 517 ('Foo.Bar1', 1), | |
| 518 ('Foo.Bar4', 1), | |
| 519 ('Foo.Bar5', 1), | |
| 520 ] | |
| 521 self._check_results_file( | |
| 522 fail=[u'Foo.Bar1', u'Foo.Bar4', u'Foo.Bar5'], | |
| 523 flaky=[], | |
| 524 missing=[ | |
| 525 u'Foo.Bar2', u'Foo.Bar3', u'Foo.Bar6', u'Foo.Bar7', u'Foo.Bar8', | |
| 526 u'Foo.Bar9', | |
| 527 ], | |
| 528 success=[], | |
| 529 test_cases=test_cases, | |
| 530 duration=True) | |
| 531 | |
| 532 def test_flaky_stop_early_xml(self): | |
| 533 # Create an unique filename and delete the file. | |
| 534 os.remove(self.filename) | |
| 535 _, err, return_code = RunTest( | |
| 536 [ | |
| 537 # In that case, it's an XML file even if it has the wrong extension. | |
| 538 '--gtest_output=xml:' + self.filename, | |
| 539 '--no-dump', | |
| 540 # Linearize execution. | |
| 541 '--clusters', '1', | |
| 542 '--jobs', '1', | |
| 543 '--retries', '1', | |
| 544 '--max-failures', '2', | |
| 545 os.path.join( | |
| 546 ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_flaky.py'), | |
| 547 self.tempdirpath, | |
| 548 ]) | |
| 549 self.assertEqual(1, return_code) | |
| 550 # Give up on checking the stdout. | |
| 551 #self.assertTrue('STOPPED EARLY' in out, out) | |
| 552 self.assertEqual('', err) | |
| 553 try: | |
| 554 actual_xml = load_xml_as_string_and_filter(self.filename) | |
| 555 except Exception as e: | |
| 556 print >> sys.stderr, e | |
| 557 print >> sys.stderr, self.filename | |
| 558 with open(self.filename, 'rb') as f: | |
| 559 print >> sys.stderr, f.read() | |
| 560 self.fail() | |
| 561 expected_xml = load_xml_as_string_and_filter( | |
| 562 os.path.join(ROOT_DIR, 'tests', 'gtest_fake', 'expected.xml')) | |
| 563 self.assertEqual(expected_xml, actual_xml) | |
| 564 | |
| 565 def test_missing(self): | |
| 566 out, err, return_code = RunTest( | |
| 567 [ | |
| 568 '--clusters', '10', | |
| 569 '--jobs', '1', | |
| 570 '--result', self.filename, | |
| 571 os.path.join( | |
| 572 ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_missing.py'), | |
| 573 self.tempdirpath, | |
| 574 ]) | |
| 575 | |
| 576 self.assertEqual(1, return_code) | |
| 577 | |
| 578 expected_out_re = [ | |
| 579 r'\[1\/4\] \d\.\d\ds Foo\.Bar1 \(\d+\.\d+s\)', | |
| 580 # Only the header is included since the test was not run alone. | |
| 581 re.escape('Note: Google Test filter = Foo.Bar3:Foo.Bar2:Foo.Bar1'), | |
| 582 '', | |
| 583 re.escape('[==========] Running 1 test from 1 test case.'), | |
| 584 re.escape('[----------] Global test environment set-up.'), | |
| 585 re.escape('[ RUN ] Foo.Bar1'), | |
| 586 re.escape('[ FAILED ] Foo.Bar1 (100 ms)'), | |
| 587 '', | |
| 588 r'\[2/4\] \d\.\d\ds Foo\.Bar3 \(\d+\.\d+s\) *', | |
| 589 r'\[3/5\] \d\.\d\ds Foo\.Bar2 \<unknown\> *', | |
| 590 r'\[4/5\] \d\.\d\ds Foo\.Bar1 \(\d+\.\d+s\) \- retry \#1', | |
| 591 | |
| 592 # Both the header and footer is included since the test was run alone. | |
| 593 ] + get_whole_test_re('Foo.Bar1', False, '100', 'Foo.Bar1') + [ | |
| 594 r'\[5/6\] \d\.\d\ds Foo\.Bar2 \<unknown\> \- retry \#1 *', | |
| 595 ] + [ | |
| 596 re.escape(l) for l in run_test_cases.running_serial_warning() | |
| 597 ] + [ | |
| 598 r'\[6/6\] \d\.\d\ds Foo\.Bar2 \<unknown\> \- retry \#2 *', | |
| 599 re.escape('Flaky tests:'), | |
| 600 re.escape(' Foo.Bar1 (tried 2 times)'), | |
| 601 re.escape('Failed tests:'), | |
| 602 re.escape(' Foo.Bar2'), | |
| 603 re.escape('Summary:'), | |
| 604 re.escape(' Success: 1 33.33% ') + r' +\d+\.\d\ds', | |
| 605 re.escape(' Flaky: 1 33.33% ') + r' +\d+\.\d\ds', | |
| 606 re.escape(' Fail: 1 33.33% ') + r' +\d+\.\d\ds', | |
| 607 r' \d+\.\d\ds Done running 3 tests with 6 executions. ' | |
| 608 '\d+\.\d\d test/s', | |
| 609 ] | |
| 610 self._check_results(expected_out_re, out, err) | |
| 611 test_cases = [ | |
| 612 ('Foo.Bar1', 2), | |
| 613 ('Foo.Bar2', 3), | |
| 614 ('Foo.Bar3', 1) | |
| 615 ] | |
| 616 self._check_results_file( | |
| 617 fail=[u'Foo.Bar2'], | |
| 618 flaky=[u'Foo.Bar1'], | |
| 619 missing=[], | |
| 620 success=[u'Foo.Bar3'], | |
| 621 test_cases=test_cases, | |
| 622 duration=True) | |
| 623 | |
| 624 def test_crash_after_pass(self): | |
| 625 out, err, return_code = RunTest( | |
| 626 [ | |
| 627 '--clusters', '10', | |
| 628 '--jobs', '1', | |
| 629 '--result', self.filename, | |
| 630 os.path.join( | |
| 631 ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_crash_after_pass.py'), | |
| 632 self.tempdirpath, | |
| 633 ]) | |
| 634 | |
| 635 self.assertEqual(0, return_code) | |
| 636 | |
| 637 expected_out_re = [ | |
| 638 r'\[1\/2\] \d\.\d\ds Foo\.Bar1 \(\d+\.\d+s\)', | |
| 639 r'\[2\/2\] \d\.\d\ds Foo\.Bar2 \(\d+\.\d+s\) *', | |
| 640 # Dumping the whole thing: | |
| 641 re.escape('Note: Google Test filter = Foo.Bar2:Foo.Bar1'), | |
| 642 '', | |
| 643 # The stack trace is included. | |
| 644 ] + (get_test_re('Foo.Bar1', False, '100') + | |
| 645 get_test_re('Foo.Bar2', False, '100') + | |
| 646 get_footer_re(2)) + [ | |
| 647 re.escape('OMG I crashed'), | |
| 648 re.escape('Here\'s a stack trace'), | |
| 649 | |
| 650 # Now resume normal retries. | |
| 651 r'\[3/4\] \d\.\d\ds Foo\.Bar1 \(\d+\.\d+s\) \- retry \#1', | |
| 652 re.escape('Note: Google Test filter = Foo.Bar1'), | |
| 653 '', | |
| 654 # Note there is no crash here due to the flakiness implementation in | |
| 655 # gtest_fake_crash_after_pass.py. | |
| 656 ] + get_test_re('Foo.Bar1', False, '100') + get_footer_re(1) + [ | |
| 657 '', | |
| 658 | |
| 659 r'\[4/4\] \d\.\d\ds Foo\.Bar2 \(\d+\.\d+s\) \- retry \#1', | |
| 660 re.escape('Note: Google Test filter = Foo.Bar2'), | |
| 661 '', | |
| 662 # Note there is no crash here due to the flakiness implementation in | |
| 663 # gtest_fake_crash_after_pass.py. | |
| 664 ] + get_test_re('Foo.Bar2', False, '100') + get_footer_re(1) + [ | |
| 665 '', | |
| 666 | |
| 667 re.escape('Flaky tests:'), | |
| 668 re.escape(' Foo.Bar1 (tried 2 times)'), | |
| 669 re.escape(' Foo.Bar2 (tried 2 times)'), | |
| 670 re.escape('Summary:'), | |
| 671 re.escape(' Success: 0 0.00% 0.00s'), | |
| 672 re.escape(' Flaky: 2 100.00% ') + r' +\d+\.\d\ds', | |
| 673 re.escape(' Fail: 0 0.00% 0.00s'), | |
| 674 r' \d+\.\d\ds Done running 2 tests with 4 executions. ' | |
| 675 '\d+\.\d\d test/s', | |
| 676 ] | |
| 677 self._check_results(expected_out_re, out, err) | |
| 678 test_cases = [ | |
| 679 ('Foo.Bar1', 2), | |
| 680 ('Foo.Bar2', 2), | |
| 681 ] | |
| 682 self._check_results_file( | |
| 683 fail=[], | |
| 684 flaky=[u'Foo.Bar1', u'Foo.Bar2'], | |
| 685 missing=[], | |
| 686 success=[], | |
| 687 test_cases=test_cases, | |
| 688 duration=True) | |
| 689 | |
| 690 def test_confused_pass(self): | |
| 691 # The test case reports that it passed but the process exit code is 1. | |
| 692 out, err, return_code = RunTest( | |
| 693 [ | |
| 694 '--result', self.filename, | |
| 695 os.path.join( | |
| 696 ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_confused_pass.py'), | |
| 697 ]) | |
| 698 | |
| 699 self.assertEqual(1, return_code) | |
| 700 | |
| 701 expected_out_re = [ | |
| 702 r'\[1\/1\] \d\.\d\ds Foo\.Bar1 \(\d+\.\d\ds\)', | |
| 703 # TODO(maruel): Why 2 empty lines are stripped off. | |
| 704 ] + get_whole_test_re('Foo.Bar1', False, '100', 'Foo.Bar1')[:-2] + [ | |
| 705 | |
| 706 r'\[2\/2\] \d\.\d\ds Foo\.Bar1 \(\d+\.\d\ds\) - retry #1', | |
| 707 ] + get_whole_test_re('Foo.Bar1', False, '100', 'Foo.Bar1') + [ | |
| 708 | |
| 709 re.escape(l) for l in run_test_cases.running_serial_warning() | |
| 710 ] + [ | |
| 711 r'\[3\/3\] \d\.\d\ds Foo\.Bar1 \(\d+\.\d\ds\) - retry #2', | |
| 712 ] + get_whole_test_re('Foo.Bar1', False, '100', 'Foo.Bar1') + [ | |
| 713 re.escape('Failed tests:'), | |
| 714 re.escape(' Foo.Bar1'), | |
| 715 re.escape('Summary:'), | |
| 716 re.escape(' Success: 0 0.00% 0.00s'), | |
| 717 re.escape(' Flaky: 0 0.00% 0.00s'), | |
| 718 re.escape(' Fail: 1 100.00% ') + r' +\d+\.\d\ds', | |
| 719 r' \d+\.\d\ds Done running 1 tests with 3 executions. \d+\.\d\d test/s', | |
| 720 ] | |
| 721 self._check_results(expected_out_re, out, err) | |
| 722 | |
| 723 test_cases = [ | |
| 724 ('Foo.Bar1', 3), | |
| 725 ] | |
| 726 self._check_results_file( | |
| 727 fail=[u'Foo.Bar1'], | |
| 728 flaky=[], | |
| 729 missing=[], | |
| 730 success=[], | |
| 731 test_cases=test_cases, | |
| 732 duration=True) | |
| 733 | |
| 734 def test_confused_fail(self): | |
| 735 # The test case prints that it failed but the process exit code is 0. | |
| 736 out, err, return_code = RunTest( | |
| 737 [ | |
| 738 '--result', self.filename, | |
| 739 os.path.join( | |
| 740 ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_confused_fail.py'), | |
| 741 ]) | |
| 742 | |
| 743 self.assertEqual(1, return_code) | |
| 744 | |
| 745 expected_out_re = [ | |
| 746 r'\[1\/2\] \d\.\d\ds Foo\.Bar1 \(\d+\.\d\ds\)', | |
| 747 ] + get_whole_test_re('Foo.Bar1', True, '100', 'Foo.Bar1') + [ | |
| 748 r'\[2\/3\] \d\.\d\ds Foo\.Bar1 \(\d+\.\d\ds\) - retry #1', | |
| 749 ] + get_whole_test_re('Foo.Bar1', True, '100', 'Foo.Bar1') + [ | |
| 750 ] + [ | |
| 751 re.escape(l) for l in run_test_cases.running_serial_warning() | |
| 752 ] + [ | |
| 753 r'\[3\/3\] \d\.\d\ds Foo\.Bar1 \(\d+\.\d\ds\) - retry #2', | |
| 754 ] + get_whole_test_re('Foo.Bar1', True, '100', 'Foo.Bar1') + [ | |
| 755 re.escape('Failed tests:'), | |
| 756 re.escape(' Foo.Bar1'), | |
| 757 re.escape('Summary:'), | |
| 758 re.escape(' Success: 0 0.00% 0.00s'), | |
| 759 re.escape(' Flaky: 0 0.00% 0.00s'), | |
| 760 re.escape(' Fail: 1 100.00% ') + r' +\d+\.\d\ds', | |
| 761 r' \d+\.\d\ds Done running 1 tests with 3 executions. \d+\.\d\d test/s', | |
| 762 ] | |
| 763 self._check_results(expected_out_re, out, err) | |
| 764 | |
| 765 test_cases = [ | |
| 766 ('Foo.Bar1', 3), | |
| 767 ] | |
| 768 self._check_results_file( | |
| 769 fail=[u'Foo.Bar1'], | |
| 770 flaky=[], | |
| 771 missing=[], | |
| 772 success=[], | |
| 773 test_cases=test_cases, | |
| 774 duration=True) | |
| 775 | |
| 776 def test_gtest_filter(self): | |
| 777 out, err, return_code = RunTest( | |
| 778 [ | |
| 779 '--gtest_filter=Foo.Bar1:Foo.Bar/*', | |
| 780 '--result', self.filename, | |
| 781 os.path.join(ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_pass.py'), | |
| 782 ]) | |
| 783 | |
| 784 self.assertEqual(0, return_code, (out, err)) | |
| 785 | |
| 786 expected_out_re = [ | |
| 787 r'\[\d/\d\] \d\.\d\ds .+', | |
| 788 r'\[\d/\d\] \d\.\d\ds .+', | |
| 789 re.escape('Summary:'), | |
| 790 re.escape(' Success: 2 100.00% ') + r' +\d+\.\d\ds', | |
| 791 re.escape(' Flaky: 0 0.00% ') + r' +\d+\.\d\ds', | |
| 792 re.escape(' Fail: 0 0.00% ') + r' +\d+\.\d\ds', | |
| 793 r' \d+\.\d\ds Done running 2 tests with 2 executions. \d+\.\d\d test/s', | |
| 794 ] | |
| 795 self._check_results(expected_out_re, out, err) | |
| 796 | |
| 797 test_cases = [ | |
| 798 ('Foo.Bar1', 1), | |
| 799 ('Foo.Bar/3', 1) | |
| 800 ] | |
| 801 self._check_results_file( | |
| 802 fail=[], | |
| 803 flaky=[], | |
| 804 missing=[], | |
| 805 success=sorted([u'Foo.Bar1', u'Foo.Bar/3']), | |
| 806 test_cases=test_cases, | |
| 807 duration=True) | |
| 808 | |
| 809 def test_gtest_filter_missing(self): | |
| 810 out, err, return_code = RunTest( | |
| 811 [ | |
| 812 '--gtest_filter=Not.Present', | |
| 813 '--result', self.filename, | |
| 814 os.path.join(ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_pass.py'), | |
| 815 ]) | |
| 816 | |
| 817 self.assertEqual(1, return_code, (out, err)) | |
| 818 expected_out_re = ['Found no test to run'] | |
| 819 self._check_results(expected_out_re, out, err) | |
| 820 self._check_results_file( | |
| 821 fail=[], | |
| 822 flaky=[], | |
| 823 missing=[], | |
| 824 success=[], | |
| 825 test_cases=[], | |
| 826 duration=False) | |
| 827 | |
| 828 | |
| 829 if __name__ == '__main__': | |
| 830 VERBOSE = '-v' in sys.argv | |
| 831 logging.basicConfig(level=logging.DEBUG if VERBOSE else logging.ERROR) | |
| 832 unittest.main() | |
| OLD | NEW |