| OLD | NEW |
| 1 # Copyright 2015 The Chromium Authors. All rights reserved. | 1 # Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 from collections import defaultdict |
| 5 import json | 6 import json |
| 6 | 7 |
| 7 from recipe_engine.config import Dict | 8 from recipe_engine.config import Dict |
| 9 from recipe_engine.config import List |
| 8 from recipe_engine.config import Single | 10 from recipe_engine.config import Single |
| 9 from recipe_engine.recipe_api import Property | 11 from recipe_engine.recipe_api import Property |
| 10 | 12 |
| 11 | 13 |
| 12 DEPS = [ | 14 DEPS = [ |
| 13 'adb', | 15 'adb', |
| 14 'depot_tools/bot_update', | 16 'depot_tools/bot_update', |
| 15 'chromium', | 17 'chromium', |
| 16 'chromium_android', | 18 'chromium_android', |
| 17 'chromium_tests', | 19 'chromium_tests', |
| (...skipping 28 matching lines...) Expand all Loading... |
| 46 'tests': Property( | 48 'tests': Property( |
| 47 kind=Dict(value_type=list), | 49 kind=Dict(value_type=list), |
| 48 help='The failed tests, the test name should be full name, e.g.: {' | 50 help='The failed tests, the test name should be full name, e.g.: {' |
| 49 ' "browser_tests": [' | 51 ' "browser_tests": [' |
| 50 ' "suite.test1", "suite.test2"' | 52 ' "suite.test1", "suite.test2"' |
| 51 ' ]' | 53 ' ]' |
| 52 '}'), | 54 '}'), |
| 53 'use_analyze': Property( | 55 'use_analyze': Property( |
| 54 kind=Single(bool, empty_val=False, required=False), default=True, | 56 kind=Single(bool, empty_val=False, required=False), default=True, |
| 55 help='Use analyze to skip commits that do not affect tests.'), | 57 help='Use analyze to skip commits that do not affect tests.'), |
| 58 'suspected_revisions': Property( |
| 59 kind=List(basestring), default=[], |
| 60 help='A list of suspected revisions from heuristic analysis.'), |
| 56 } | 61 } |
| 57 | 62 |
| 58 | 63 |
| 59 class TestResult(object): | 64 class TestResult(object): |
| 60 SKIPPED = 'skipped' # A commit doesn't impact the test. | 65 SKIPPED = 'skipped' # A commit doesn't impact the test. |
| 61 PASSED = 'passed' # The compile or test passed. | 66 PASSED = 'passed' # The compile or test passed. |
| 62 FAILED = 'failed' # The compile or test failed. | 67 FAILED = 'failed' # The compile or test failed. |
| 63 INFRA_FAILED = 'infra_failed' # Infra failed. | 68 INFRA_FAILED = 'infra_failed' # Infra failed. |
| 64 | 69 |
| 65 | 70 |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 127 override_bot_type='builder_tester') | 132 override_bot_type='builder_tester') |
| 128 | 133 |
| 129 # Run the tests. | 134 # Run the tests. |
| 130 with api.chromium_tests.wrap_chromium_tests( | 135 with api.chromium_tests.wrap_chromium_tests( |
| 131 bot_config, actual_tests_to_run): | 136 bot_config, actual_tests_to_run): |
| 132 failed_tests = api.test_utils.run_tests( | 137 failed_tests = api.test_utils.run_tests( |
| 133 api, actual_tests_to_run, | 138 api, actual_tests_to_run, |
| 134 suffix=revision, test_filters=requested_tests) | 139 suffix=revision, test_filters=requested_tests) |
| 135 | 140 |
| 136 # Process failed tests. | 141 # Process failed tests. |
| 142 failed_tests_dict = defaultdict(list) |
| 137 for failed_test in failed_tests: | 143 for failed_test in failed_tests: |
| 138 valid = failed_test.has_valid_results(api, suffix=revision) | 144 valid = failed_test.has_valid_results(api, suffix=revision) |
| 139 results[failed_test.name] = { | 145 results[failed_test.name] = { |
| 140 'status': TestResult.FAILED, | 146 'status': TestResult.FAILED, |
| 141 'valid': valid, | 147 'valid': valid, |
| 142 } | 148 } |
| 143 if valid: | 149 if valid: |
| 144 results[failed_test.name]['failures'] = list( | 150 test_list = list(failed_test.failures(api, suffix=revision)) |
| 145 failed_test.failures(api, suffix=revision)) | 151 results[failed_test.name]['failures'] = test_list |
| 152 failed_tests_dict[failed_test.name].extend(test_list) |
| 146 | 153 |
| 147 # Process passed tests. | 154 # Process passed tests. |
| 148 for test in actual_tests_to_run: | 155 for test in actual_tests_to_run: |
| 149 if test not in failed_tests: | 156 if test not in failed_tests: |
| 150 results[test.name] = { | 157 results[test.name] = { |
| 151 'status': TestResult.PASSED, | 158 'status': TestResult.PASSED, |
| 152 'valid': True, | 159 'valid': True, |
| 153 } | 160 } |
| 154 | 161 |
| 155 # Process skipped tests in two scenarios: | 162 # Process skipped tests in two scenarios: |
| 156 # 1. Skipped by "analyze": tests are not affected by the given revision. | 163 # 1. Skipped by "analyze": tests are not affected by the given revision. |
| 157 # 2. Skipped because the requested tests don't exist at the given revision. | 164 # 2. Skipped because the requested tests don't exist at the given revision. |
| 158 for test_name in requested_tests.keys(): | 165 for test_name in requested_tests.keys(): |
| 159 if test_name not in results: | 166 if test_name not in results: |
| 160 results[test_name] = { | 167 results[test_name] = { |
| 161 'status': TestResult.SKIPPED, | 168 'status': TestResult.SKIPPED, |
| 162 'valid': True, | 169 'valid': True, |
| 163 } | 170 } |
| 164 | 171 |
| 165 return results | 172 return results, failed_tests_dict |
| 166 | 173 |
| 167 | 174 |
| 168 def RunSteps(api, target_mastername, target_testername, | 175 def _get_reduced_test_dict(original_test_dict, failed_tests_dict): |
| 169 good_revision, bad_revision, tests, use_analyze): | 176 # Remove tests that are in both dicts from the original test dict. |
| 177 if not failed_tests_dict: |
| 178 return original_test_dict |
| 179 reduced_dict = defaultdict(list) |
| 180 for step, tests in original_test_dict.iteritems(): |
| 181 if step in failed_tests_dict: |
| 182 for test in tests: |
| 183 if test not in failed_tests_dict[step]: |
| 184 reduced_dict[step].append(test) |
| 185 else: |
| 186 reduced_dict[step].extend(tests) |
| 187 return reduced_dict |
| 188 |
| 189 |
| 190 def RunSteps(api, target_mastername, target_testername, good_revision, |
| 191 bad_revision, tests, use_analyze, suspected_revisions): |
| 170 assert tests, 'No failed tests were specified.' | 192 assert tests, 'No failed tests were specified.' |
| 171 | 193 |
| 172 # Figure out which builder configuration we should match for compile config. | 194 # Figure out which builder configuration we should match for compile config. |
| 173 # Sometimes, the builder itself runs the tests and there is no tester. In | 195 # Sometimes, the builder itself runs the tests and there is no tester. In |
| 174 # such cases, just treat the builder as a "tester". Thus, we default to | 196 # such cases, just treat the builder as a "tester". Thus, we default to |
| 175 # the target tester. | 197 # the target tester. |
| 176 tester_config = api.chromium_tests.builders.get( | 198 tester_config = api.chromium_tests.builders.get( |
| 177 target_mastername).get('builders', {}).get(target_testername) | 199 target_mastername).get('builders', {}).get(target_testername) |
| 178 target_buildername = (tester_config.get('parent_buildername') or | 200 target_buildername = (tester_config.get('parent_buildername') or |
| 179 target_testername) | 201 target_testername) |
| (...skipping 11 matching lines...) Expand all Loading... |
| 191 api.swarming.set_default_dimension(key, value) | 213 api.swarming.set_default_dimension(key, value) |
| 192 # TODO(stgao): Fix the issue that precommit=False adds the tag 'purpose:CI'. | 214 # TODO(stgao): Fix the issue that precommit=False adds the tag 'purpose:CI'. |
| 193 api.chromium_tests.configure_swarming('chromium', precommit=False) | 215 api.chromium_tests.configure_swarming('chromium', precommit=False) |
| 194 | 216 |
| 195 # Sync to bad revision, and retrieve revisions in the regression range. | 217 # Sync to bad revision, and retrieve revisions in the regression range. |
| 196 api.chromium_tests.prepare_checkout( | 218 api.chromium_tests.prepare_checkout( |
| 197 bot_config, | 219 bot_config, |
| 198 root_solution_revision=bad_revision) | 220 root_solution_revision=bad_revision) |
| 199 revisions_to_check = api.findit.revisions_between(good_revision, bad_revision) | 221 revisions_to_check = api.findit.revisions_between(good_revision, bad_revision) |
| 200 | 222 |
| 223 suspected_revision_index = [ |
| 224 revisions_to_check.index(r) |
| 225 for r in set(suspected_revisions) if r in revisions_to_check] |
| 226 |
| 227 # Segments revisions_to_check by suspected_revisions. |
| 228 # Each sub_range will contain following elements: |
| 229 # 1. Revision before a suspected_revision or None as a placeholder |
| 230 # when no such revision |
| 231 # 2. Suspected_revision |
| 232 # 3. Revisions between a suspected_revision and the revision before next |
| 233 # suspected_revision, or remaining revisions before all suspect_revisions. |
| 234 # For example, if revisions_to_check are [r0, r1, ..., r6] and |
| 235 # suspected_revisions are [r2, r5], sub_ranges will be: |
| 236 # [[None, r0], [r1, r2, r3], [r4, r5, r6]] |
| 237 if suspected_revision_index: |
| 238 # If there are consecutive revisions being suspected, include them |
| 239 # in the same sub_range by only saving the oldest revision. |
| 240 suspected_revision_index = [i for i in suspected_revision_index |
| 241 if i - 1 not in suspected_revision_index] |
| 242 sub_ranges = [] |
| 243 remaining_revisions = revisions_to_check[:] |
| 244 for index in sorted(suspected_revision_index, reverse=True): |
| 245 if index > 0: |
| 246 sub_ranges.append(remaining_revisions[index - 1:]) |
| 247 remaining_revisions = remaining_revisions[:index - 1] |
| 248 # None is a placeholder for the last known good revision. |
| 249 sub_ranges.append([None] + remaining_revisions) |
| 250 else: |
| 251 # Treats the entire regression range as a single sub-range. |
| 252 sub_ranges = [[None] + revisions_to_check] |
| 253 |
| 201 test_results = {} | 254 test_results = {} |
| 202 try_job_metadata = { | 255 try_job_metadata = { |
| 203 'regression_range_size': len(revisions_to_check) | 256 'regression_range_size': len(revisions_to_check) |
| 204 } | 257 } |
| 205 report = { | 258 report = { |
| 206 'result': test_results, | 259 'result': test_results, |
| 207 'metadata': try_job_metadata | 260 'metadata': try_job_metadata |
| 208 } | 261 } |
| 209 | 262 |
| 210 revision_being_checked = None | 263 revision_being_checked = None |
| 211 try: | 264 try: |
| 212 # We compile & run tests from the first revision to the last revision in the | 265 culprits = defaultdict(dict) |
| 213 # regression range serially instead of a typical bisecting, because jumping | 266 # Tests that haven't found culprits in tested revision(s). |
| 214 # between new and old revisions might affect Goma capacity and build cycle | 267 tests_have_not_found_culprit = tests |
| 215 # times. Thus we plan to go with this simple serial approach first so that | 268 # Iterates through sub_ranges and find culprits for each failed test. |
| 216 # compile would be fast due to incremental compile. | 269 # Sub-ranges with newer revisions are tested first so we have better chance |
| 217 # If this won't work out, we will figure out a better solution for speed of | 270 # that try job will reproduce exactly the same failure as in waterfall. |
| 218 # both compile and test. | 271 for sub_range in sub_ranges: |
| 219 for current_revision in revisions_to_check: | 272 if not tests_have_not_found_culprit: # All tests have found culprits. |
| 220 revision_being_checked = current_revision | 273 break |
| 221 test_results[current_revision] = _compile_and_test_at_revision( | 274 |
| 222 api, target_mastername, target_buildername, target_testername, | 275 # The revision right before the suspected revision provided by |
| 223 current_revision, tests, use_analyze) | 276 # the heuristic result. |
| 224 # TODO(http://crbug.com/566975): check whether culprits for all failed | 277 potential_green_rev = sub_range[0] |
| 225 # tests are found and stop running tests at later revisions if so. | 278 following_revisions = sub_range[1:] |
| 279 if potential_green_rev: |
| 280 revision_being_checked = potential_green_rev |
| 281 test_results[potential_green_rev], tests_failed_in_potential_green = ( |
| 282 _compile_and_test_at_revision( |
| 283 api, target_mastername, target_buildername, target_testername, |
| 284 potential_green_rev,tests_have_not_found_culprit, use_analyze)) |
| 285 else: |
| 286 tests_failed_in_potential_green = {} |
| 287 |
| 288 tests_passed_in_potential_green = _get_reduced_test_dict( |
| 289 tests_have_not_found_culprit, tests_failed_in_potential_green) |
| 290 |
| 291 # Culprits for tests that failed in potential green should be earlier, so |
| 292 # removes passed tests and only runs failed ones in following revisions. |
| 293 if tests_passed_in_potential_green: |
| 294 tests_to_run = tests_passed_in_potential_green |
| 295 for revision in following_revisions: |
| 296 revision_being_checked = revision |
| 297 # Since tests_to_run are tests that passed in previous revision, |
| 298 # whichever test that fails now will find current revision is the |
| 299 # culprit. |
| 300 test_results[revision], tests_failed_in_revision = ( |
| 301 _compile_and_test_at_revision( |
| 302 api, target_mastername, target_buildername, target_testername, |
| 303 revision, tests_to_run, use_analyze)) |
| 304 |
| 305 # Removes tests that passed in potential green and failed in |
| 306 # following revisions: culprits have been found for them. |
| 307 tests_have_not_found_culprit = _get_reduced_test_dict( |
| 308 tests_have_not_found_culprit, tests_failed_in_revision) |
| 309 |
| 310 # Only runs tests that have not found culprits in later revisions. |
| 311 tests_to_run = _get_reduced_test_dict( |
| 312 tests_to_run, tests_failed_in_revision) |
| 313 |
| 314 # Records found culprits. |
| 315 for step, test_list in tests_failed_in_revision.iteritems(): |
| 316 for test in test_list: |
| 317 culprits[step][test] = revision |
| 318 |
| 319 if not tests_to_run: |
| 320 break |
| 321 |
| 226 except api.step.InfraFailure: | 322 except api.step.InfraFailure: |
| 227 test_results[revision_being_checked] = TestResult.INFRA_FAILED | 323 test_results[revision_being_checked] = TestResult.INFRA_FAILED |
| 228 report['metadata']['infra_failure'] = True | 324 report['metadata']['infra_failure'] = True |
| 229 raise | 325 raise |
| 230 finally: | 326 finally: |
| 327 if culprits: |
| 328 report['culprits'] = culprits |
| 329 |
| 231 # Give the full report including test results and metadata. | 330 # Give the full report including test results and metadata. |
| 232 step_result = api.python.succeeding_step( | 331 step_result = api.python.succeeding_step( |
| 233 'report', [json.dumps(report, indent=2)], as_log='report') | 332 'report', [json.dumps(report, indent=2)], as_log='report') |
| 234 | 333 |
| 235 # Set the report as a build property too, so that it will be reported back | 334 # Set the report as a build property too, so that it will be reported back |
| 236 # to Buildbucket and Findit will pull from there instead of buildbot master. | 335 # to Buildbucket and Findit will pull from there instead of buildbot master. |
| 237 step_result.presentation.properties['report'] = report | 336 step_result.presentation.properties['report'] = report |
| 238 | 337 |
| 239 return report | 338 return report |
| 240 | 339 |
| 241 | 340 |
| 242 def GenTests(api): | 341 def GenTests(api): |
| 243 def props(tests, platform_name, tester_name, use_analyze=False): | 342 def props( |
| 343 tests, platform_name, tester_name, use_analyze=False, good_revision=None, |
| 344 bad_revision=None, suspected_revisions=None): |
| 244 properties = { | 345 properties = { |
| 245 'mastername': 'tryserver.chromium.%s' % platform_name, | 346 'mastername': 'tryserver.chromium.%s' % platform_name, |
| 246 'buildername': '%s_chromium_variable' % platform_name, | 347 'buildername': '%s_chromium_variable' % platform_name, |
| 247 'slavename': 'build1-a1', | 348 'slavename': 'build1-a1', |
| 248 'buildnumber': 1, | 349 'buildnumber': 1, |
| 249 'target_mastername': 'chromium.%s' % platform_name, | 350 'target_mastername': 'chromium.%s' % platform_name, |
| 250 'target_testername': tester_name, | 351 'target_testername': tester_name, |
| 251 'good_revision': 'r0', | 352 'good_revision': good_revision or 'r0', |
| 252 'bad_revision': 'r1', | 353 'bad_revision': bad_revision or 'r1', |
| 253 'tests': tests, | 354 'tests': tests, |
| 254 'use_analyze': use_analyze, | 355 'use_analyze': use_analyze, |
| 255 } | 356 } |
| 357 if suspected_revisions: |
| 358 properties['suspected_revisions'] = suspected_revisions |
| 256 return api.properties(**properties) + api.platform.name(platform_name) | 359 return api.properties(**properties) + api.platform.name(platform_name) |
| 257 | 360 |
| 258 def simulated_gtest_output(failed_test_names=(), passed_test_names=()): | 361 def simulated_gtest_output(failed_test_names=(), passed_test_names=()): |
| 259 cur_iteration_data = {} | 362 cur_iteration_data = {} |
| 260 for test_name in failed_test_names: | 363 for test_name in failed_test_names: |
| 261 cur_iteration_data[test_name] = [{ | 364 cur_iteration_data[test_name] = [{ |
| 262 'elapsed_time_ms': 0, | 365 'elapsed_time_ms': 0, |
| 263 'output_snippet': '', | 366 'output_snippet': '', |
| 264 'status': 'FAILURE', | 367 'status': 'FAILURE', |
| 265 }] | 368 }] |
| (...skipping 203 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 469 ], | 572 ], |
| 470 }, | 573 }, |
| 471 })) + | 574 })) + |
| 472 api.override_step_data( | 575 api.override_step_data( |
| 473 'test r1.gl_tests (r1) on Mac-10.9', | 576 'test r1.gl_tests (r1) on Mac-10.9', |
| 474 simulated_gtest_output(passed_test_names=['Test.One']) | 577 simulated_gtest_output(passed_test_names=['Test.One']) |
| 475 ) | 578 ) |
| 476 ) | 579 ) |
| 477 | 580 |
| 478 yield ( | 581 yield ( |
| 582 api.test('findit_culprit_in_last_sub_range') + |
| 583 props( |
| 584 {'gl_tests': ['Test.One']}, 'mac', 'Mac10.9 Tests', use_analyze=False, |
| 585 good_revision='r0', bad_revision='r6', suspected_revisions=['r3']) + |
| 586 api.override_step_data('test r2.read test spec', api.json.output({ |
| 587 'Mac10.9 Tests': { |
| 588 'gtest_tests': [ |
| 589 { |
| 590 'test': 'gl_tests', |
| 591 'swarming': {'can_use_on_swarming_builders': True}, |
| 592 }, |
| 593 ], |
| 594 }, |
| 595 })) + |
| 596 api.override_step_data('test r3.read test spec', api.json.output({ |
| 597 'Mac10.9 Tests': { |
| 598 'gtest_tests': [ |
| 599 { |
| 600 'test': 'gl_tests', |
| 601 'swarming': {'can_use_on_swarming_builders': True}, |
| 602 }, |
| 603 ], |
| 604 }, |
| 605 })) + |
| 606 api.override_step_data( |
| 607 'git commits in range', |
| 608 api.raw_io.stream_output( |
| 609 '\n'.join('r%d' % i for i in reversed(range(1, 7))))) + |
| 610 api.override_step_data( |
| 611 'test r2.gl_tests (r2) on Mac-10.9', |
| 612 simulated_gtest_output(passed_test_names=['Test.One'])) + |
| 613 api.override_step_data( |
| 614 'test r3.gl_tests (r3) on Mac-10.9', |
| 615 simulated_gtest_output(failed_test_names=['Test.One'])) |
| 616 ) |
| 617 |
| 618 yield ( |
| 619 api.test('findit_culprit_in_middle_sub_range') + |
| 620 props( |
| 621 {'gl_tests': ['Test.One']}, 'mac', 'Mac10.9 Tests', use_analyze=False, |
| 622 good_revision='r0', bad_revision='r6', |
| 623 suspected_revisions=['r3', 'r6']) + |
| 624 api.override_step_data('test r2.read test spec', api.json.output({ |
| 625 'Mac10.9 Tests': { |
| 626 'gtest_tests': [ |
| 627 { |
| 628 'test': 'gl_tests', |
| 629 'swarming': {'can_use_on_swarming_builders': True}, |
| 630 }, |
| 631 ], |
| 632 }, |
| 633 })) + |
| 634 api.override_step_data('test r3.read test spec', api.json.output({ |
| 635 'Mac10.9 Tests': { |
| 636 'gtest_tests': [ |
| 637 { |
| 638 'test': 'gl_tests', |
| 639 'swarming': {'can_use_on_swarming_builders': True}, |
| 640 }, |
| 641 ], |
| 642 }, |
| 643 })) + |
| 644 api.override_step_data('test r5.read test spec', api.json.output({ |
| 645 'Mac10.9 Tests': { |
| 646 'gtest_tests': [ |
| 647 { |
| 648 'test': 'gl_tests', |
| 649 'swarming': {'can_use_on_swarming_builders': True}, |
| 650 }, |
| 651 ], |
| 652 }, |
| 653 })) + |
| 654 api.override_step_data('test r6.read test spec', api.json.output({ |
| 655 'Mac10.9 Tests': { |
| 656 'gtest_tests': [ |
| 657 { |
| 658 'test': 'gl_tests', |
| 659 'swarming': {'can_use_on_swarming_builders': True}, |
| 660 }, |
| 661 ], |
| 662 }, |
| 663 })) + |
| 664 api.override_step_data( |
| 665 'git commits in range', |
| 666 api.raw_io.stream_output( |
| 667 '\n'.join('r%d' % i for i in reversed(range(1, 7))))) + |
| 668 api.override_step_data( |
| 669 'test r2.gl_tests (r2) on Mac-10.9', |
| 670 simulated_gtest_output(passed_test_names=['Test.One'])) + |
| 671 api.override_step_data( |
| 672 'test r3.gl_tests (r3) on Mac-10.9', |
| 673 simulated_gtest_output(failed_test_names=['Test.One'])) + |
| 674 api.override_step_data( |
| 675 'test r5.gl_tests (r5) on Mac-10.9', |
| 676 simulated_gtest_output(passed_test_names=['Test.One'])) + |
| 677 api.override_step_data( |
| 678 'test r6.gl_tests (r6) on Mac-10.9', |
| 679 simulated_gtest_output(passed_test_names=['Test.One'])) |
| 680 ) |
| 681 |
| 682 yield ( |
| 683 api.test('findit_culprit_in_first_sub_range') + |
| 684 props( |
| 685 {'gl_tests': ['Test.One']}, 'mac', 'Mac10.9 Tests', use_analyze=False, |
| 686 good_revision='r0', bad_revision='r6', |
| 687 suspected_revisions=['r6']) + |
| 688 api.override_step_data('test r1.read test spec', api.json.output({ |
| 689 'Mac10.9 Tests': { |
| 690 'gtest_tests': [ |
| 691 { |
| 692 'test': 'gl_tests', |
| 693 'swarming': {'can_use_on_swarming_builders': True}, |
| 694 }, |
| 695 ], |
| 696 }, |
| 697 })) + |
| 698 api.override_step_data('test r5.read test spec', api.json.output({ |
| 699 'Mac10.9 Tests': { |
| 700 'gtest_tests': [ |
| 701 { |
| 702 'test': 'gl_tests', |
| 703 'swarming': {'can_use_on_swarming_builders': True}, |
| 704 }, |
| 705 ], |
| 706 }, |
| 707 })) + |
| 708 api.override_step_data('test r6.read test spec', api.json.output({ |
| 709 'Mac10.9 Tests': { |
| 710 'gtest_tests': [ |
| 711 { |
| 712 'test': 'gl_tests', |
| 713 'swarming': {'can_use_on_swarming_builders': True}, |
| 714 }, |
| 715 ], |
| 716 }, |
| 717 })) + |
| 718 api.override_step_data( |
| 719 'git commits in range', |
| 720 api.raw_io.stream_output( |
| 721 '\n'.join('r%d' % i for i in reversed(range(1, 7))))) + |
| 722 api.override_step_data( |
| 723 'test r1.gl_tests (r1) on Mac-10.9', |
| 724 simulated_gtest_output(failed_test_names=['Test.One'])) + |
| 725 api.override_step_data( |
| 726 'test r5.gl_tests (r5) on Mac-10.9', |
| 727 simulated_gtest_output(passed_test_names=['Test.One'])) + |
| 728 api.override_step_data( |
| 729 'test r6.gl_tests (r6) on Mac-10.9', |
| 730 simulated_gtest_output(passed_test_names=['Test.One'])) |
| 731 ) |
| 732 |
| 733 yield ( |
| 734 api.test('findit_steps_multiple_culprits') + |
| 735 props( |
| 736 {'gl_tests': ['Test.gl_One'], 'browser_tests': ['Test.browser_One']}, |
| 737 'mac', 'Mac10.9 Tests', use_analyze=False, |
| 738 good_revision='r0', bad_revision='r6', |
| 739 suspected_revisions=['r3','r6']) + |
| 740 api.override_step_data('test r2.read test spec', api.json.output({ |
| 741 'Mac10.9 Tests': { |
| 742 'gtest_tests': [ |
| 743 { |
| 744 'test': 'gl_tests', |
| 745 'swarming': {'can_use_on_swarming_builders': True}, |
| 746 }, |
| 747 { |
| 748 'test': 'browser_tests', |
| 749 'swarming': {'can_use_on_swarming_builders': True}, |
| 750 }, |
| 751 ], |
| 752 }, |
| 753 })) + |
| 754 api.override_step_data('test r3.read test spec', api.json.output({ |
| 755 'Mac10.9 Tests': { |
| 756 'gtest_tests': [ |
| 757 { |
| 758 'test': 'gl_tests', |
| 759 'swarming': {'can_use_on_swarming_builders': True}, |
| 760 }, |
| 761 { |
| 762 'test': 'browser_tests', |
| 763 'swarming': {'can_use_on_swarming_builders': True}, |
| 764 }, |
| 765 ], |
| 766 }, |
| 767 })) + |
| 768 api.override_step_data('test r5.read test spec', api.json.output({ |
| 769 'Mac10.9 Tests': { |
| 770 'gtest_tests': [ |
| 771 { |
| 772 'test': 'gl_tests', |
| 773 'swarming': {'can_use_on_swarming_builders': True}, |
| 774 }, |
| 775 { |
| 776 'test': 'browser_tests', |
| 777 'swarming': {'can_use_on_swarming_builders': True}, |
| 778 }, |
| 779 ], |
| 780 }, |
| 781 })) + |
| 782 api.override_step_data('test r6.read test spec', api.json.output({ |
| 783 'Mac10.9 Tests': { |
| 784 'gtest_tests': [ |
| 785 { |
| 786 'test': 'gl_tests', |
| 787 'swarming': {'can_use_on_swarming_builders': True}, |
| 788 }, |
| 789 { |
| 790 'test': 'browser_tests', |
| 791 'swarming': {'can_use_on_swarming_builders': True}, |
| 792 }, |
| 793 ], |
| 794 }, |
| 795 })) + |
| 796 api.override_step_data( |
| 797 'git commits in range', |
| 798 api.raw_io.stream_output( |
| 799 '\n'.join('r%d' % i for i in reversed(range(1, 7))))) + |
| 800 api.override_step_data( |
| 801 'test r5.gl_tests (r5) on Mac-10.9', |
| 802 simulated_gtest_output(failed_test_names=['Test.gl_One'])) + |
| 803 api.override_step_data( |
| 804 'test r5.browser_tests (r5) on Mac-10.9', |
| 805 simulated_gtest_output(passed_test_names=['Test.browser_One'])) + |
| 806 api.override_step_data( |
| 807 'test r6.browser_tests (r6) on Mac-10.9', |
| 808 simulated_gtest_output(failed_test_names=['Test.browser_One']))+ |
| 809 api.override_step_data( |
| 810 'test r2.gl_tests (r2) on Mac-10.9', |
| 811 simulated_gtest_output(passed_test_names=['Test.gl_One'])) + |
| 812 api.override_step_data( |
| 813 'test r3.gl_tests (r3) on Mac-10.9', |
| 814 simulated_gtest_output(failed_test_names=['Test.gl_One'])) |
| 815 ) |
| 816 |
| 817 yield ( |
| 818 api.test('findit_tests_multiple_culprits') + |
| 819 props( |
| 820 {'gl_tests': ['Test.One', 'Test.Two', 'Test.Three']}, |
| 821 'mac', 'Mac10.9 Tests', use_analyze=False, |
| 822 good_revision='r0', bad_revision='r6', |
| 823 suspected_revisions=['r3', 'r5']) + |
| 824 api.override_step_data('test r2.read test spec', api.json.output({ |
| 825 'Mac10.9 Tests': { |
| 826 'gtest_tests': [ |
| 827 { |
| 828 'test': 'gl_tests', |
| 829 'swarming': {'can_use_on_swarming_builders': True}, |
| 830 }, |
| 831 ], |
| 832 }, |
| 833 })) + |
| 834 api.override_step_data('test r3.read test spec', api.json.output({ |
| 835 'Mac10.9 Tests': { |
| 836 'gtest_tests': [ |
| 837 { |
| 838 'test': 'gl_tests', |
| 839 'swarming': {'can_use_on_swarming_builders': True}, |
| 840 }, |
| 841 ], |
| 842 }, |
| 843 })) + |
| 844 api.override_step_data('test r4.read test spec', api.json.output({ |
| 845 'Mac10.9 Tests': { |
| 846 'gtest_tests': [ |
| 847 { |
| 848 'test': 'gl_tests', |
| 849 'swarming': {'can_use_on_swarming_builders': True}, |
| 850 }, |
| 851 ], |
| 852 }, |
| 853 })) + |
| 854 api.override_step_data('test r5.read test spec', api.json.output({ |
| 855 'Mac10.9 Tests': { |
| 856 'gtest_tests': [ |
| 857 { |
| 858 'test': 'gl_tests', |
| 859 'swarming': {'can_use_on_swarming_builders': True}, |
| 860 }, |
| 861 ], |
| 862 }, |
| 863 })) + |
| 864 api.override_step_data('test r6.read test spec', api.json.output({ |
| 865 'Mac10.9 Tests': { |
| 866 'gtest_tests': [ |
| 867 { |
| 868 'test': 'gl_tests', |
| 869 'swarming': {'can_use_on_swarming_builders': True}, |
| 870 }, |
| 871 ], |
| 872 }, |
| 873 })) + |
| 874 api.override_step_data( |
| 875 'git commits in range', |
| 876 api.raw_io.stream_output( |
| 877 '\n'.join('r%d' % i for i in reversed(range(1, 7))))) + |
| 878 api.override_step_data( |
| 879 'test r4.gl_tests (r4) on Mac-10.9', |
| 880 simulated_gtest_output(passed_test_names=['Test.One', 'Test.Three'], |
| 881 failed_test_names=['Test.Two'])) + |
| 882 api.override_step_data( |
| 883 'test r5.gl_tests (r5) on Mac-10.9', |
| 884 simulated_gtest_output(passed_test_names=['Test.One'], |
| 885 failed_test_names=['Test.Three'])) + |
| 886 api.override_step_data( |
| 887 'test r6.gl_tests (r6) on Mac-10.9', |
| 888 simulated_gtest_output(failed_test_names=['Test.One']))+ |
| 889 api.override_step_data( |
| 890 'test r2.gl_tests (r2) on Mac-10.9', |
| 891 simulated_gtest_output(passed_test_names=['Test.Two'])) + |
| 892 api.override_step_data( |
| 893 'test r3.gl_tests (r3) on Mac-10.9', |
| 894 simulated_gtest_output(failed_test_names=['Test.Two'])) |
| 895 ) |
| 896 |
| 897 |
| 898 yield ( |
| 899 api.test('findit_consecutive_culprits') + |
| 900 props( |
| 901 {'gl_tests': ['Test.One']}, |
| 902 'mac', 'Mac10.9 Tests', use_analyze=False, |
| 903 good_revision='r0', bad_revision='r6', |
| 904 suspected_revisions=['r3', 'r4']) + |
| 905 api.override_step_data('test r2.read test spec', api.json.output({ |
| 906 'Mac10.9 Tests': { |
| 907 'gtest_tests': [ |
| 908 { |
| 909 'test': 'gl_tests', |
| 910 'swarming': {'can_use_on_swarming_builders': True}, |
| 911 }, |
| 912 ], |
| 913 }, |
| 914 })) + |
| 915 api.override_step_data('test r3.read test spec', api.json.output({ |
| 916 'Mac10.9 Tests': { |
| 917 'gtest_tests': [ |
| 918 { |
| 919 'test': 'gl_tests', |
| 920 'swarming': {'can_use_on_swarming_builders': True}, |
| 921 }, |
| 922 ], |
| 923 }, |
| 924 })) + |
| 925 api.override_step_data('test r4.read test spec', api.json.output({ |
| 926 'Mac10.9 Tests': { |
| 927 'gtest_tests': [ |
| 928 { |
| 929 'test': 'gl_tests', |
| 930 'swarming': {'can_use_on_swarming_builders': True}, |
| 931 }, |
| 932 ], |
| 933 }, |
| 934 })) + |
| 935 api.override_step_data( |
| 936 'git commits in range', |
| 937 api.raw_io.stream_output( |
| 938 '\n'.join('r%d' % i for i in reversed(range(1, 7))))) + |
| 939 api.override_step_data( |
| 940 'test r4.gl_tests (r4) on Mac-10.9', |
| 941 simulated_gtest_output(failed_test_names=['Test.One'])) + |
| 942 api.override_step_data( |
| 943 'test r2.gl_tests (r2) on Mac-10.9', |
| 944 simulated_gtest_output(passed_test_names=['Test.One'])) + |
| 945 api.override_step_data( |
| 946 'test r3.gl_tests (r3) on Mac-10.9', |
| 947 simulated_gtest_output(passed_test_names=['Test.One'])) |
| 948 ) |
| 949 |
| 950 yield ( |
| 479 api.test('record_infra_failure') + | 951 api.test('record_infra_failure') + |
| 480 props({'gl_tests': ['Test.One']}, 'mac', 'Mac10.9 Tests') + | 952 props({'gl_tests': ['Test.One']}, 'mac', 'Mac10.9 Tests') + |
| 481 api.override_step_data('test r1.read test spec', api.json.output({ | 953 api.override_step_data('test r1.read test spec', api.json.output({ |
| 482 'Mac10.9 Tests': { | 954 'Mac10.9 Tests': { |
| 483 'gtest_tests': [ | 955 'gtest_tests': [ |
| 484 { | 956 { |
| 485 'test': 'gl_tests', | 957 'test': 'gl_tests', |
| 486 'swarming': {'can_use_on_swarming_builders': True}, | 958 'swarming': {'can_use_on_swarming_builders': True}, |
| 487 }, | 959 }, |
| 488 ], | 960 ], |
| 489 }, | 961 }, |
| 490 })) + | 962 })) + |
| 491 api.override_step_data( | 963 api.override_step_data( |
| 492 'test r1.compile', | 964 'test r1.compile', |
| 493 api.json.output({ | 965 api.json.output({ |
| 494 'notice': [ | 966 'notice': [ |
| 495 { | 967 { |
| 496 'infra_status': { | 968 'infra_status': { |
| 497 'ping_status_code': 408, | 969 'ping_status_code': 408, |
| 498 }, | 970 }, |
| 499 }, | 971 }, |
| 500 ], | 972 ], |
| 501 }), | 973 }), |
| 502 retcode=1) | 974 retcode=1) |
| 503 ) | 975 ) |
| OLD | NEW |