Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright 2015 The Chromium Authors. All rights reserved. | 1 # Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 from collections import defaultdict | |
| 5 import json | 6 import json |
| 6 | 7 |
| 7 from recipe_engine.config import Dict | 8 from recipe_engine.config import Dict |
| 9 from recipe_engine.config import List | |
| 8 from recipe_engine.config import Single | 10 from recipe_engine.config import Single |
| 9 from recipe_engine.recipe_api import Property | 11 from recipe_engine.recipe_api import Property |
| 10 | 12 |
| 11 | 13 |
| 12 DEPS = [ | 14 DEPS = [ |
| 13 'adb', | 15 'adb', |
| 14 'depot_tools/bot_update', | 16 'depot_tools/bot_update', |
| 15 'chromium', | 17 'chromium', |
| 16 'chromium_android', | 18 'chromium_android', |
| 17 'chromium_tests', | 19 'chromium_tests', |
| (...skipping 28 matching lines...) Expand all Loading... | |
| 46 'tests': Property( | 48 'tests': Property( |
| 47 kind=Dict(value_type=list), | 49 kind=Dict(value_type=list), |
| 48 help='The failed tests, the test name should be full name, e.g.: {' | 50 help='The failed tests, the test name should be full name, e.g.: {' |
| 49 ' "browser_tests": [' | 51 ' "browser_tests": [' |
| 50 ' "suite.test1", "suite.test2"' | 52 ' "suite.test1", "suite.test2"' |
| 51 ' ]' | 53 ' ]' |
| 52 '}'), | 54 '}'), |
| 53 'use_analyze': Property( | 55 'use_analyze': Property( |
| 54 kind=Single(bool, empty_val=False, required=False), default=True, | 56 kind=Single(bool, empty_val=False, required=False), default=True, |
| 55 help='Use analyze to skip commits that do not affect tests.'), | 57 help='Use analyze to skip commits that do not affect tests.'), |
| 58 'suspected_revisions': Property( | |
| 59 kind=List(basestring), default=[], | |
| 60 help='A list of suspected revisions from heuristic analysis.'), | |
| 56 } | 61 } |
| 57 | 62 |
| 58 | 63 |
| 59 class TestResult(object): | 64 class TestResult(object): |
| 60 SKIPPED = 'skipped' # A commit doesn't impact the test. | 65 SKIPPED = 'skipped' # A commit doesn't impact the test. |
| 61 PASSED = 'passed' # The compile or test passed. | 66 PASSED = 'passed' # The compile or test passed. |
| 62 FAILED = 'failed' # The compile or test failed. | 67 FAILED = 'failed' # The compile or test failed. |
| 63 | 68 |
| 64 | 69 |
| 65 def _compile_and_test_at_revision(api, target_mastername, target_buildername, | 70 def _compile_and_test_at_revision(api, target_mastername, target_buildername, |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 119 bot_config, | 124 bot_config, |
| 120 bot_update_step, | 125 bot_update_step, |
| 121 bot_db, | 126 bot_db, |
| 122 actual_compile_targets, | 127 actual_compile_targets, |
| 123 tests_including_triggered=actual_tests_to_run, | 128 tests_including_triggered=actual_tests_to_run, |
| 124 mb_mastername=target_mastername, | 129 mb_mastername=target_mastername, |
| 125 mb_buildername=target_buildername, | 130 mb_buildername=target_buildername, |
| 126 override_bot_type='builder_tester') | 131 override_bot_type='builder_tester') |
| 127 | 132 |
| 128 # Run the tests. | 133 # Run the tests. |
| 134 failed_tests_dict = defaultdict(list) | |
|
lijeffrey
2016/05/06 18:41:55
nit: move this closer to where it's being used
chanli
2016/05/06 23:39:18
Done.
| |
| 129 with api.chromium_tests.wrap_chromium_tests( | 135 with api.chromium_tests.wrap_chromium_tests( |
| 130 bot_config, actual_tests_to_run): | 136 bot_config, actual_tests_to_run): |
| 131 failed_tests = api.test_utils.run_tests( | 137 failed_tests = api.test_utils.run_tests( |
| 132 api, actual_tests_to_run, | 138 api, actual_tests_to_run, |
| 133 suffix=revision, test_filters=requested_tests) | 139 suffix=revision, test_filters=requested_tests) |
| 134 | 140 |
| 135 # Process failed tests. | 141 # Process failed tests. |
| 136 for failed_test in failed_tests: | 142 for failed_test in failed_tests: |
| 137 valid = failed_test.has_valid_results(api, suffix=revision) | 143 valid = failed_test.has_valid_results(api, suffix=revision) |
| 138 results[failed_test.name] = { | 144 results[failed_test.name] = { |
| 139 'status': TestResult.FAILED, | 145 'status': TestResult.FAILED, |
| 140 'valid': valid, | 146 'valid': valid, |
| 141 } | 147 } |
| 142 if valid: | 148 if valid: |
| 143 results[failed_test.name]['failures'] = list( | 149 test_list = list(failed_test.failures(api, suffix=revision)) |
| 144 failed_test.failures(api, suffix=revision)) | 150 results[failed_test.name]['failures'] = test_list |
| 151 failed_tests_dict[failed_test.name].extend(test_list) | |
| 145 | 152 |
| 146 # Process passed tests. | 153 # Process passed tests. |
| 147 for test in actual_tests_to_run: | 154 for test in actual_tests_to_run: |
| 148 if test not in failed_tests: | 155 if test not in failed_tests: |
| 149 results[test.name] = { | 156 results[test.name] = { |
| 150 'status': TestResult.PASSED, | 157 'status': TestResult.PASSED, |
| 151 'valid': True, | 158 'valid': True, |
| 152 } | 159 } |
| 153 | 160 |
| 154 # Process skipped tests in two scenarios: | 161 # Process skipped tests in two scenarios: |
| 155 # 1. Skipped by "analyze": tests are not affected by the given revision. | 162 # 1. Skipped by "analyze": tests are not affected by the given revision. |
| 156 # 2. Skipped because the requested tests don't exist at the given revision. | 163 # 2. Skipped because the requested tests don't exist at the given revision. |
| 157 for test_name in requested_tests.keys(): | 164 for test_name in requested_tests.keys(): |
| 158 if test_name not in results: | 165 if test_name not in results: |
| 159 results[test_name] = { | 166 results[test_name] = { |
| 160 'status': TestResult.SKIPPED, | 167 'status': TestResult.SKIPPED, |
| 161 'valid': True, | 168 'valid': True, |
| 162 } | 169 } |
| 163 | 170 |
| 164 return results | 171 return results, failed_tests_dict |
| 165 | 172 |
| 166 | 173 |
| 167 def RunSteps(api, target_mastername, target_testername, | 174 def _get_reduced_test_dict(original_test_dict, failed_tests_dict): |
| 168 good_revision, bad_revision, tests, use_analyze): | 175 # Remove tests that are in both dicts from the original test dict. |
| 176 if not failed_tests_dict: | |
| 177 return original_test_dict | |
| 178 reduced_dict = defaultdict(list) | |
| 179 for step, tests in original_test_dict.iteritems(): | |
| 180 if step in failed_tests_dict: | |
| 181 for test in tests: | |
| 182 if test not in failed_tests_dict[step]: | |
| 183 reduced_dict[step].append(test) | |
| 184 else: | |
| 185 reduced_dict[step].extend(tests) | |
| 186 return reduced_dict | |
| 187 | |
| 188 | |
| 189 def RunSteps(api, target_mastername, target_testername, good_revision, | |
| 190 bad_revision, tests, use_analyze, suspected_revisions): | |
| 169 assert tests, 'No failed tests were specified.' | 191 assert tests, 'No failed tests were specified.' |
| 170 | 192 |
| 171 # Figure out which builder configuration we should match for compile config. | 193 # Figure out which builder configuration we should match for compile config. |
| 172 # Sometimes, the builder itself runs the tests and there is no tester. In | 194 # Sometimes, the builder itself runs the tests and there is no tester. In |
| 173 # such cases, just treat the builder as a "tester". Thus, we default to | 195 # such cases, just treat the builder as a "tester". Thus, we default to |
| 174 # the target tester. | 196 # the target tester. |
| 175 tester_config = api.chromium_tests.builders.get( | 197 tester_config = api.chromium_tests.builders.get( |
| 176 target_mastername).get('builders', {}).get(target_testername) | 198 target_mastername).get('builders', {}).get(target_testername) |
| 177 target_buildername = (tester_config.get('parent_buildername') or | 199 target_buildername = (tester_config.get('parent_buildername') or |
| 178 target_testername) | 200 target_testername) |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 190 api.swarming.set_default_dimension(key, value) | 212 api.swarming.set_default_dimension(key, value) |
| 191 # TODO(stgao): Fix the issue that precommit=False adds the tag 'purpose:CI'. | 213 # TODO(stgao): Fix the issue that precommit=False adds the tag 'purpose:CI'. |
| 192 api.chromium_tests.configure_swarming('chromium', precommit=False) | 214 api.chromium_tests.configure_swarming('chromium', precommit=False) |
| 193 | 215 |
| 194 # Sync to bad revision, and retrieve revisions in the regression range. | 216 # Sync to bad revision, and retrieve revisions in the regression range. |
| 195 api.chromium_tests.prepare_checkout( | 217 api.chromium_tests.prepare_checkout( |
| 196 bot_config, | 218 bot_config, |
| 197 root_solution_revision=bad_revision) | 219 root_solution_revision=bad_revision) |
| 198 revisions_to_check = api.findit.revisions_between(good_revision, bad_revision) | 220 revisions_to_check = api.findit.revisions_between(good_revision, bad_revision) |
| 199 | 221 |
| 222 suspected_revision_index = [ | |
| 223 revisions_to_check.index(r) | |
| 224 for r in set(suspected_revisions) if r in revisions_to_check] | |
| 225 | |
| 226 # Segments revisions_to_check by suspected_revisions. | |
| 227 # Each sub_range will contain following elements: | |
| 228 # 1. Revision before a suspected_revision or None as a placeholder | |
| 229 # when no such revision | |
| 230 # 2. Suspected_revision | |
| 231 # 3. Revisions between a suspected_revision and the revision before next | |
| 232 # suspected_revision, or remaining revisions before all suspect_revisions. | |
| 233 # For example, if revisions_to_check are [r0, r1, ..., r6] and | |
| 234 # suspected_revisions are [r2, r5], sub_ranges will be: | |
| 235 # [[None, r0], [r1, r2, r3], [r4, r5, r6]] | |
| 236 if suspected_revision_index: | |
| 237 # If there are consecutive revisions being suspected, include them | |
| 238 # in the same sub_range by only saving the oldest revision. | |
| 239 suspected_revision_index = [i for i in suspected_revision_index | |
| 240 if i - 1 not in suspected_revision_index] | |
| 241 sub_ranges = [] | |
| 242 remaining_revisions = revisions_to_check[:] | |
| 243 for index in sorted(suspected_revision_index, reverse=True): | |
| 244 if index > 0: | |
| 245 sub_ranges.append(remaining_revisions[index - 1:]) | |
| 246 remaining_revisions = remaining_revisions[:index - 1] | |
| 247 # None is a placeholder for the last known good revision. | |
| 248 sub_ranges.append([None] + remaining_revisions) | |
| 249 else: | |
| 250 # Treats the entire regression range as a single sub-range. | |
| 251 sub_ranges = [[None] + revisions_to_check] | |
| 252 | |
| 200 test_results = {} | 253 test_results = {} |
| 201 try_job_metadata = { | 254 try_job_metadata = { |
| 202 'regression_range_size': len(revisions_to_check) | 255 'regression_range_size': len(revisions_to_check) |
| 203 } | 256 } |
| 204 report = { | 257 report = { |
| 205 'result': test_results, | 258 'result': test_results, |
| 206 'metadata': try_job_metadata | 259 'metadata': try_job_metadata |
| 207 } | 260 } |
| 208 | 261 |
| 209 try: | 262 try: |
| 210 # We compile & run tests from the first revision to the last revision in the | 263 culprits = defaultdict(dict) |
| 211 # regression range serially instead of a typical bisecting, because jumping | 264 # Tests that haven't found culprits in tested revision(s). |
| 212 # between new and old revisions might affect Goma capacity and build cycle | 265 tests_have_not_found_culprit = tests |
| 213 # times. Thus we plan to go with this simple serial approach first so that | 266 # Iterates through sub_ranges and find culprits for each failed test. |
| 214 # compile would be fast due to incremental compile. | 267 # Sub-ranges with newer revisions are tested first so we have better chance |
| 215 # If this won't work out, we will figure out a better solution for speed of | 268 # that try job will reproduce exactly the same failure as in waterfall. |
| 216 # both compile and test. | 269 for sub_range in sub_ranges: |
| 217 for current_revision in revisions_to_check: | 270 if not tests_have_not_found_culprit: # All tests have found culprits. |
| 218 test_results[current_revision] = _compile_and_test_at_revision( | 271 break |
| 219 api, target_mastername, target_buildername, target_testername, | 272 |
| 220 current_revision, tests, use_analyze) | 273 # The revision right before the suspected revision provided by |
| 221 # TODO(http://crbug.com/566975): check whether culprits for all failed | 274 # the heuristic result. |
| 222 # tests are found and stop running tests at later revisions if so. | 275 potential_green_rev = sub_range[0] |
| 276 following_revisions = sub_range[1:] | |
| 277 if potential_green_rev: | |
| 278 test_results[potential_green_rev], tests_failed_in_potential_green = ( | |
| 279 _compile_and_test_at_revision( | |
| 280 api, target_mastername, target_buildername, target_testername, | |
| 281 potential_green_rev,tests_have_not_found_culprit, use_analyze)) | |
| 282 else: | |
| 283 tests_failed_in_potential_green = {} | |
| 284 | |
| 285 tests_passed_in_potential_green = _get_reduced_test_dict( | |
| 286 tests_have_not_found_culprit, tests_failed_in_potential_green) | |
| 287 | |
| 288 # Culprits for tests that failed in potential green should be earlier, so | |
| 289 # removes passed tests and only runs failed ones in following revisions. | |
| 290 if tests_passed_in_potential_green: | |
| 291 tests_to_run = tests_passed_in_potential_green | |
| 292 for revision in following_revisions: | |
| 293 # Since tests_to_run are tests that passed in previous revision, | |
| 294 # whichever test that fails now will find current revision is the | |
| 295 # culprit. | |
| 296 test_results[revision], tests_failed_in_revision = ( | |
| 297 _compile_and_test_at_revision( | |
| 298 api, target_mastername, target_buildername, target_testername, | |
| 299 revision, tests_to_run, use_analyze)) | |
| 300 | |
| 301 # Removes tests that passed in potential green and failed in | |
| 302 # following revisions: culprits have been found for them. | |
| 303 tests_have_not_found_culprit = _get_reduced_test_dict( | |
| 304 tests_have_not_found_culprit, tests_failed_in_revision) | |
| 305 | |
| 306 # Only runs tests that have not found culprits in later revisions. | |
| 307 tests_to_run = _get_reduced_test_dict( | |
| 308 tests_to_run, tests_failed_in_revision) | |
| 309 | |
| 310 # Records down found culprits. | |
|
lijeffrey
2016/05/06 18:41:55
nit: "down" is not necessary
chanli
2016/05/06 23:39:18
Done.
| |
| 311 for step, test_list in tests_failed_in_revision.iteritems(): | |
| 312 for test in test_list: | |
| 313 culprits[step][test] = revision | |
| 314 | |
| 315 if not tests_to_run: | |
| 316 break | |
| 317 | |
| 223 finally: | 318 finally: |
| 319 if culprits: | |
| 320 report['culprits'] = culprits | |
| 321 | |
| 224 # Give the full report including test results and metadata. | 322 # Give the full report including test results and metadata. |
| 225 step_result = api.python.succeeding_step( | 323 step_result = api.python.succeeding_step( |
| 226 'report', [json.dumps(report, indent=2)], as_log='report') | 324 'report', [json.dumps(report, indent=2)], as_log='report') |
| 227 | 325 |
| 228 # Set the report as a build property too, so that it will be reported back | 326 # Set the report as a build property too, so that it will be reported back |
| 229 # to Buildbucket and Findit will pull from there instead of buildbot master. | 327 # to Buildbucket and Findit will pull from there instead of buildbot master. |
| 230 step_result.presentation.properties['report'] = report | 328 step_result.presentation.properties['report'] = report |
| 231 | 329 |
| 232 return report | 330 return report |
| 233 | 331 |
| 234 | 332 |
| 235 def GenTests(api): | 333 def GenTests(api): |
| 236 def props(tests, platform_name, tester_name, use_analyze=False): | 334 def props( |
| 335 tests, platform_name, tester_name, use_analyze=False, good_revision=None, | |
| 336 bad_revision=None, suspected_revisions=None): | |
| 237 properties = { | 337 properties = { |
| 238 'mastername': 'tryserver.chromium.%s' % platform_name, | 338 'mastername': 'tryserver.chromium.%s' % platform_name, |
| 239 'buildername': '%s_chromium_variable' % platform_name, | 339 'buildername': '%s_chromium_variable' % platform_name, |
| 240 'slavename': 'build1-a1', | 340 'slavename': 'build1-a1', |
| 241 'buildnumber': 1, | 341 'buildnumber': 1, |
| 242 'target_mastername': 'chromium.%s' % platform_name, | 342 'target_mastername': 'chromium.%s' % platform_name, |
| 243 'target_testername': tester_name, | 343 'target_testername': tester_name, |
| 244 'good_revision': 'r0', | 344 'good_revision': good_revision or 'r0', |
| 245 'bad_revision': 'r1', | 345 'bad_revision': bad_revision or 'r1', |
| 246 'tests': tests, | 346 'tests': tests, |
| 247 'use_analyze': use_analyze, | 347 'use_analyze': use_analyze, |
| 248 } | 348 } |
| 349 if suspected_revisions: | |
| 350 properties['suspected_revisions'] = suspected_revisions | |
| 249 return api.properties(**properties) + api.platform.name(platform_name) | 351 return api.properties(**properties) + api.platform.name(platform_name) |
| 250 | 352 |
| 251 def simulated_gtest_output(failed_test_names=(), passed_test_names=()): | 353 def simulated_gtest_output(failed_test_names=(), passed_test_names=()): |
| 252 cur_iteration_data = {} | 354 cur_iteration_data = {} |
| 253 for test_name in failed_test_names: | 355 for test_name in failed_test_names: |
| 254 cur_iteration_data[test_name] = [{ | 356 cur_iteration_data[test_name] = [{ |
| 255 'elapsed_time_ms': 0, | 357 'elapsed_time_ms': 0, |
| 256 'output_snippet': '', | 358 'output_snippet': '', |
| 257 'status': 'FAILURE', | 359 'status': 'FAILURE', |
| 258 }] | 360 }] |
| (...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 460 'swarming': {'can_use_on_swarming_builders': True}, | 562 'swarming': {'can_use_on_swarming_builders': True}, |
| 461 }, | 563 }, |
| 462 ], | 564 ], |
| 463 }, | 565 }, |
| 464 })) + | 566 })) + |
| 465 api.override_step_data( | 567 api.override_step_data( |
| 466 'test r1.gl_tests (r1) on Mac-10.9', | 568 'test r1.gl_tests (r1) on Mac-10.9', |
| 467 simulated_gtest_output(passed_test_names=['Test.One']) | 569 simulated_gtest_output(passed_test_names=['Test.One']) |
| 468 ) | 570 ) |
| 469 ) | 571 ) |
| 572 | |
| 573 yield ( | |
| 574 api.test('findit_culprit_in_last_sub_range') + | |
| 575 props( | |
| 576 {'gl_tests': ['Test.One']}, 'mac', 'Mac10.9 Tests', use_analyze=False, | |
| 577 good_revision='r0', bad_revision='r6', suspected_revisions=['r3']) + | |
| 578 api.override_step_data('test r2.read test spec', api.json.output({ | |
| 579 'Mac10.9 Tests': { | |
| 580 'gtest_tests': [ | |
| 581 { | |
| 582 'test': 'gl_tests', | |
| 583 'swarming': {'can_use_on_swarming_builders': True}, | |
| 584 }, | |
| 585 ], | |
| 586 }, | |
| 587 })) + | |
| 588 api.override_step_data('test r3.read test spec', api.json.output({ | |
| 589 'Mac10.9 Tests': { | |
| 590 'gtest_tests': [ | |
| 591 { | |
| 592 'test': 'gl_tests', | |
| 593 'swarming': {'can_use_on_swarming_builders': True}, | |
| 594 }, | |
| 595 ], | |
| 596 }, | |
| 597 })) + | |
| 598 api.override_step_data( | |
| 599 'git commits in range', | |
| 600 api.raw_io.stream_output( | |
| 601 '\n'.join('r%d' % i for i in reversed(range(1, 7))))) + | |
| 602 api.override_step_data( | |
| 603 'test r2.gl_tests (r2) on Mac-10.9', | |
| 604 simulated_gtest_output(passed_test_names=['Test.One'])) + | |
| 605 api.override_step_data( | |
| 606 'test r3.gl_tests (r3) on Mac-10.9', | |
| 607 simulated_gtest_output(failed_test_names=['Test.One'])) | |
| 608 ) | |
| 609 | |
| 610 yield ( | |
| 611 api.test('findit_culprit_in_middle_sub_range') + | |
| 612 props( | |
| 613 {'gl_tests': ['Test.One']}, 'mac', 'Mac10.9 Tests', use_analyze=False, | |
| 614 good_revision='r0', bad_revision='r6', | |
| 615 suspected_revisions=['r3', 'r6']) + | |
| 616 api.override_step_data('test r2.read test spec', api.json.output({ | |
| 617 'Mac10.9 Tests': { | |
| 618 'gtest_tests': [ | |
| 619 { | |
| 620 'test': 'gl_tests', | |
| 621 'swarming': {'can_use_on_swarming_builders': True}, | |
| 622 }, | |
| 623 ], | |
| 624 }, | |
| 625 })) + | |
| 626 api.override_step_data('test r3.read test spec', api.json.output({ | |
| 627 'Mac10.9 Tests': { | |
| 628 'gtest_tests': [ | |
| 629 { | |
| 630 'test': 'gl_tests', | |
| 631 'swarming': {'can_use_on_swarming_builders': True}, | |
| 632 }, | |
| 633 ], | |
| 634 }, | |
| 635 })) + | |
| 636 api.override_step_data('test r5.read test spec', api.json.output({ | |
| 637 'Mac10.9 Tests': { | |
| 638 'gtest_tests': [ | |
| 639 { | |
| 640 'test': 'gl_tests', | |
| 641 'swarming': {'can_use_on_swarming_builders': True}, | |
| 642 }, | |
| 643 ], | |
| 644 }, | |
| 645 })) + | |
| 646 api.override_step_data('test r6.read test spec', api.json.output({ | |
| 647 'Mac10.9 Tests': { | |
| 648 'gtest_tests': [ | |
| 649 { | |
| 650 'test': 'gl_tests', | |
| 651 'swarming': {'can_use_on_swarming_builders': True}, | |
| 652 }, | |
| 653 ], | |
| 654 }, | |
| 655 })) + | |
| 656 api.override_step_data( | |
| 657 'git commits in range', | |
| 658 api.raw_io.stream_output( | |
| 659 '\n'.join('r%d' % i for i in reversed(range(1, 7))))) + | |
| 660 api.override_step_data( | |
| 661 'test r2.gl_tests (r2) on Mac-10.9', | |
| 662 simulated_gtest_output(passed_test_names=['Test.One'])) + | |
| 663 api.override_step_data( | |
| 664 'test r3.gl_tests (r3) on Mac-10.9', | |
| 665 simulated_gtest_output(failed_test_names=['Test.One'])) + | |
| 666 api.override_step_data( | |
| 667 'test r5.gl_tests (r5) on Mac-10.9', | |
| 668 simulated_gtest_output(passed_test_names=['Test.One'])) + | |
| 669 api.override_step_data( | |
| 670 'test r6.gl_tests (r6) on Mac-10.9', | |
| 671 simulated_gtest_output(passed_test_names=['Test.One'])) | |
| 672 ) | |
| 673 | |
| 674 yield ( | |
| 675 api.test('findit_culprit_in_first_sub_range') + | |
| 676 props( | |
| 677 {'gl_tests': ['Test.One']}, 'mac', 'Mac10.9 Tests', use_analyze=False, | |
| 678 good_revision='r0', bad_revision='r6', | |
| 679 suspected_revisions=['r6']) + | |
| 680 api.override_step_data('test r1.read test spec', api.json.output({ | |
| 681 'Mac10.9 Tests': { | |
| 682 'gtest_tests': [ | |
| 683 { | |
| 684 'test': 'gl_tests', | |
| 685 'swarming': {'can_use_on_swarming_builders': True}, | |
| 686 }, | |
| 687 ], | |
| 688 }, | |
| 689 })) + | |
| 690 api.override_step_data('test r5.read test spec', api.json.output({ | |
| 691 'Mac10.9 Tests': { | |
| 692 'gtest_tests': [ | |
| 693 { | |
| 694 'test': 'gl_tests', | |
| 695 'swarming': {'can_use_on_swarming_builders': True}, | |
| 696 }, | |
| 697 ], | |
| 698 }, | |
| 699 })) + | |
| 700 api.override_step_data('test r6.read test spec', api.json.output({ | |
| 701 'Mac10.9 Tests': { | |
| 702 'gtest_tests': [ | |
| 703 { | |
| 704 'test': 'gl_tests', | |
| 705 'swarming': {'can_use_on_swarming_builders': True}, | |
| 706 }, | |
| 707 ], | |
| 708 }, | |
| 709 })) + | |
| 710 api.override_step_data( | |
| 711 'git commits in range', | |
| 712 api.raw_io.stream_output( | |
| 713 '\n'.join('r%d' % i for i in reversed(range(1, 7))))) + | |
| 714 api.override_step_data( | |
| 715 'test r1.gl_tests (r1) on Mac-10.9', | |
| 716 simulated_gtest_output(failed_test_names=['Test.One'])) + | |
| 717 api.override_step_data( | |
| 718 'test r5.gl_tests (r5) on Mac-10.9', | |
| 719 simulated_gtest_output(passed_test_names=['Test.One'])) + | |
| 720 api.override_step_data( | |
| 721 'test r6.gl_tests (r6) on Mac-10.9', | |
| 722 simulated_gtest_output(passed_test_names=['Test.One'])) | |
| 723 ) | |
| 724 | |
| 725 yield ( | |
| 726 api.test('findit_steps_multiple_culprits') + | |
| 727 props( | |
| 728 {'gl_tests': ['Test.gl_One'], 'browser_tests': ['Test.browser_One']}, | |
| 729 'mac', 'Mac10.9 Tests', use_analyze=False, | |
| 730 good_revision='r0', bad_revision='r6', | |
| 731 suspected_revisions=['r3','r6']) + | |
| 732 api.override_step_data('test r2.read test spec', api.json.output({ | |
| 733 'Mac10.9 Tests': { | |
| 734 'gtest_tests': [ | |
| 735 { | |
| 736 'test': 'gl_tests', | |
| 737 'swarming': {'can_use_on_swarming_builders': True}, | |
| 738 }, | |
| 739 { | |
| 740 'test': 'browser_tests', | |
| 741 'swarming': {'can_use_on_swarming_builders': True}, | |
| 742 }, | |
| 743 ], | |
| 744 }, | |
| 745 })) + | |
| 746 api.override_step_data('test r3.read test spec', api.json.output({ | |
| 747 'Mac10.9 Tests': { | |
| 748 'gtest_tests': [ | |
| 749 { | |
| 750 'test': 'gl_tests', | |
| 751 'swarming': {'can_use_on_swarming_builders': True}, | |
| 752 }, | |
| 753 { | |
| 754 'test': 'browser_tests', | |
| 755 'swarming': {'can_use_on_swarming_builders': True}, | |
| 756 }, | |
| 757 ], | |
| 758 }, | |
| 759 })) + | |
| 760 api.override_step_data('test r5.read test spec', api.json.output({ | |
| 761 'Mac10.9 Tests': { | |
| 762 'gtest_tests': [ | |
| 763 { | |
| 764 'test': 'gl_tests', | |
| 765 'swarming': {'can_use_on_swarming_builders': True}, | |
| 766 }, | |
| 767 { | |
| 768 'test': 'browser_tests', | |
| 769 'swarming': {'can_use_on_swarming_builders': True}, | |
| 770 }, | |
| 771 ], | |
| 772 }, | |
| 773 })) + | |
| 774 api.override_step_data('test r6.read test spec', api.json.output({ | |
| 775 'Mac10.9 Tests': { | |
| 776 'gtest_tests': [ | |
| 777 { | |
| 778 'test': 'gl_tests', | |
| 779 'swarming': {'can_use_on_swarming_builders': True}, | |
| 780 }, | |
| 781 { | |
| 782 'test': 'browser_tests', | |
| 783 'swarming': {'can_use_on_swarming_builders': True}, | |
| 784 }, | |
| 785 ], | |
| 786 }, | |
| 787 })) + | |
| 788 api.override_step_data( | |
| 789 'git commits in range', | |
| 790 api.raw_io.stream_output( | |
| 791 '\n'.join('r%d' % i for i in reversed(range(1, 7))))) + | |
| 792 api.override_step_data( | |
| 793 'test r5.gl_tests (r5) on Mac-10.9', | |
| 794 simulated_gtest_output(failed_test_names=['Test.gl_One'])) + | |
| 795 api.override_step_data( | |
| 796 'test r5.browser_tests (r5) on Mac-10.9', | |
| 797 simulated_gtest_output(passed_test_names=['Test.browser_One'])) + | |
| 798 api.override_step_data( | |
| 799 'test r6.browser_tests (r6) on Mac-10.9', | |
| 800 simulated_gtest_output(failed_test_names=['Test.browser_One']))+ | |
| 801 api.override_step_data( | |
| 802 'test r2.gl_tests (r2) on Mac-10.9', | |
| 803 simulated_gtest_output(passed_test_names=['Test.gl_One'])) + | |
| 804 api.override_step_data( | |
| 805 'test r3.gl_tests (r3) on Mac-10.9', | |
| 806 simulated_gtest_output(failed_test_names=['Test.gl_One'])) | |
| 807 ) | |
| 808 | |
| 809 yield ( | |
| 810 api.test('findit_tests_multiple_culprits') + | |
| 811 props( | |
| 812 {'gl_tests': ['Test.One', 'Test.Two', 'Test.Three']}, | |
| 813 'mac', 'Mac10.9 Tests', use_analyze=False, | |
| 814 good_revision='r0', bad_revision='r6', | |
| 815 suspected_revisions=['r3', 'r5']) + | |
| 816 api.override_step_data('test r2.read test spec', api.json.output({ | |
| 817 'Mac10.9 Tests': { | |
| 818 'gtest_tests': [ | |
| 819 { | |
| 820 'test': 'gl_tests', | |
| 821 'swarming': {'can_use_on_swarming_builders': True}, | |
| 822 }, | |
| 823 ], | |
| 824 }, | |
| 825 })) + | |
| 826 api.override_step_data('test r3.read test spec', api.json.output({ | |
| 827 'Mac10.9 Tests': { | |
| 828 'gtest_tests': [ | |
| 829 { | |
| 830 'test': 'gl_tests', | |
| 831 'swarming': {'can_use_on_swarming_builders': True}, | |
| 832 }, | |
| 833 ], | |
| 834 }, | |
| 835 })) + | |
| 836 api.override_step_data('test r4.read test spec', api.json.output({ | |
| 837 'Mac10.9 Tests': { | |
| 838 'gtest_tests': [ | |
| 839 { | |
| 840 'test': 'gl_tests', | |
| 841 'swarming': {'can_use_on_swarming_builders': True}, | |
| 842 }, | |
| 843 ], | |
| 844 }, | |
| 845 })) + | |
| 846 api.override_step_data('test r5.read test spec', api.json.output({ | |
| 847 'Mac10.9 Tests': { | |
| 848 'gtest_tests': [ | |
| 849 { | |
| 850 'test': 'gl_tests', | |
| 851 'swarming': {'can_use_on_swarming_builders': True}, | |
| 852 }, | |
| 853 ], | |
| 854 }, | |
| 855 })) + | |
| 856 api.override_step_data('test r6.read test spec', api.json.output({ | |
| 857 'Mac10.9 Tests': { | |
| 858 'gtest_tests': [ | |
| 859 { | |
| 860 'test': 'gl_tests', | |
| 861 'swarming': {'can_use_on_swarming_builders': True}, | |
| 862 }, | |
| 863 ], | |
| 864 }, | |
| 865 })) + | |
| 866 api.override_step_data( | |
| 867 'git commits in range', | |
| 868 api.raw_io.stream_output( | |
| 869 '\n'.join('r%d' % i for i in reversed(range(1, 7))))) + | |
| 870 api.override_step_data( | |
| 871 'test r4.gl_tests (r4) on Mac-10.9', | |
| 872 simulated_gtest_output(passed_test_names=['Test.One', 'Test.Three'], | |
| 873 failed_test_names=['Test.Two'])) + | |
| 874 api.override_step_data( | |
| 875 'test r5.gl_tests (r5) on Mac-10.9', | |
| 876 simulated_gtest_output(passed_test_names=['Test.One'], | |
| 877 failed_test_names=['Test.Three'])) + | |
| 878 api.override_step_data( | |
| 879 'test r6.gl_tests (r6) on Mac-10.9', | |
| 880 simulated_gtest_output(failed_test_names=['Test.One']))+ | |
| 881 api.override_step_data( | |
| 882 'test r2.gl_tests (r2) on Mac-10.9', | |
| 883 simulated_gtest_output(passed_test_names=['Test.Two'])) + | |
| 884 api.override_step_data( | |
| 885 'test r3.gl_tests (r3) on Mac-10.9', | |
| 886 simulated_gtest_output(failed_test_names=['Test.Two'])) | |
| 887 ) | |
| 888 | |
| 889 | |
| 890 yield ( | |
| 891 api.test('findit_consecutive_culprits') + | |
| 892 props( | |
| 893 {'gl_tests': ['Test.One']}, | |
| 894 'mac', 'Mac10.9 Tests', use_analyze=False, | |
| 895 good_revision='r0', bad_revision='r6', | |
| 896 suspected_revisions=['r3', 'r4']) + | |
| 897 api.override_step_data('test r2.read test spec', api.json.output({ | |
| 898 'Mac10.9 Tests': { | |
| 899 'gtest_tests': [ | |
| 900 { | |
| 901 'test': 'gl_tests', | |
| 902 'swarming': {'can_use_on_swarming_builders': True}, | |
| 903 }, | |
| 904 ], | |
| 905 }, | |
| 906 })) + | |
| 907 api.override_step_data('test r3.read test spec', api.json.output({ | |
| 908 'Mac10.9 Tests': { | |
| 909 'gtest_tests': [ | |
| 910 { | |
| 911 'test': 'gl_tests', | |
| 912 'swarming': {'can_use_on_swarming_builders': True}, | |
| 913 }, | |
| 914 ], | |
| 915 }, | |
| 916 })) + | |
| 917 api.override_step_data('test r4.read test spec', api.json.output({ | |
| 918 'Mac10.9 Tests': { | |
| 919 'gtest_tests': [ | |
| 920 { | |
| 921 'test': 'gl_tests', | |
| 922 'swarming': {'can_use_on_swarming_builders': True}, | |
| 923 }, | |
| 924 ], | |
| 925 }, | |
| 926 })) + | |
| 927 api.override_step_data( | |
| 928 'git commits in range', | |
| 929 api.raw_io.stream_output( | |
| 930 '\n'.join('r%d' % i for i in reversed(range(1, 7))))) + | |
| 931 api.override_step_data( | |
| 932 'test r4.gl_tests (r4) on Mac-10.9', | |
| 933 simulated_gtest_output(failed_test_names=['Test.One'])) + | |
| 934 api.override_step_data( | |
| 935 'test r2.gl_tests (r2) on Mac-10.9', | |
| 936 simulated_gtest_output(passed_test_names=['Test.One'])) + | |
| 937 api.override_step_data( | |
| 938 'test r3.gl_tests (r3) on Mac-10.9', | |
| 939 simulated_gtest_output(passed_test_names=['Test.One'])) | |
| 940 ) | |
| OLD | NEW |