Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright 2015 The Chromium Authors. All rights reserved. | 1 # Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 from collections import defaultdict | |
| 5 import json | 6 import json |
| 6 | 7 |
| 7 from recipe_engine.config import Dict | 8 from recipe_engine.config import Dict |
| 9 from recipe_engine.config import List | |
| 8 from recipe_engine.config import Single | 10 from recipe_engine.config import Single |
| 9 from recipe_engine.recipe_api import Property | 11 from recipe_engine.recipe_api import Property |
| 10 | 12 |
| 11 | 13 |
| 12 DEPS = [ | 14 DEPS = [ |
| 13 'adb', | 15 'adb', |
| 14 'depot_tools/bot_update', | 16 'depot_tools/bot_update', |
| 15 'chromium', | 17 'chromium', |
| 16 'chromium_android', | 18 'chromium_android', |
| 17 'chromium_tests', | 19 'chromium_tests', |
| (...skipping 28 matching lines...) Expand all Loading... | |
| 46 'tests': Property( | 48 'tests': Property( |
| 47 kind=Dict(value_type=list), | 49 kind=Dict(value_type=list), |
| 48 help='The failed tests, the test name should be full name, e.g.: {' | 50 help='The failed tests, the test name should be full name, e.g.: {' |
| 49 ' "browser_tests": [' | 51 ' "browser_tests": [' |
| 50 ' "suite.test1", "suite.test2"' | 52 ' "suite.test1", "suite.test2"' |
| 51 ' ]' | 53 ' ]' |
| 52 '}'), | 54 '}'), |
| 53 'use_analyze': Property( | 55 'use_analyze': Property( |
| 54 kind=Single(bool, empty_val=False, required=False), default=True, | 56 kind=Single(bool, empty_val=False, required=False), default=True, |
| 55 help='Use analyze to skip commits that do not affect tests.'), | 57 help='Use analyze to skip commits that do not affect tests.'), |
| 58 'suspected_revisions': Property( | |
| 59 kind=List(basestring), default=[], | |
| 60 help='A list of suspected revisions from heuristic analysis.'), | |
| 56 } | 61 } |
| 57 | 62 |
| 58 | 63 |
| 59 class TestResult(object): | 64 class TestResult(object): |
| 60 SKIPPED = 'skipped' # A commit doesn't impact the test. | 65 SKIPPED = 'skipped' # A commit doesn't impact the test. |
| 61 PASSED = 'passed' # The compile or test passed. | 66 PASSED = 'passed' # The compile or test passed. |
| 62 FAILED = 'failed' # The compile or test failed. | 67 FAILED = 'failed' # The compile or test failed. |
| 63 | 68 |
| 64 | 69 |
| 65 def _compile_and_test_at_revision(api, target_mastername, target_buildername, | 70 def _compile_and_test_at_revision(api, target_mastername, target_buildername, |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 119 bot_config, | 124 bot_config, |
| 120 bot_update_step, | 125 bot_update_step, |
| 121 bot_db, | 126 bot_db, |
| 122 actual_compile_targets, | 127 actual_compile_targets, |
| 123 tests_including_triggered=actual_tests_to_run, | 128 tests_including_triggered=actual_tests_to_run, |
| 124 mb_mastername=target_mastername, | 129 mb_mastername=target_mastername, |
| 125 mb_buildername=target_buildername, | 130 mb_buildername=target_buildername, |
| 126 override_bot_type='builder_tester') | 131 override_bot_type='builder_tester') |
| 127 | 132 |
| 128 # Run the tests. | 133 # Run the tests. |
| 134 failed_tests_dict = defaultdict(list) | |
| 129 with api.chromium_tests.wrap_chromium_tests( | 135 with api.chromium_tests.wrap_chromium_tests( |
| 130 bot_config, actual_tests_to_run): | 136 bot_config, actual_tests_to_run): |
| 131 failed_tests = api.test_utils.run_tests( | 137 failed_tests = api.test_utils.run_tests( |
| 132 api, actual_tests_to_run, | 138 api, actual_tests_to_run, |
| 133 suffix=revision, test_filters=requested_tests) | 139 suffix=revision, test_filters=requested_tests) |
| 134 | 140 |
| 135 # Process failed tests. | 141 # Process failed tests. |
| 136 for failed_test in failed_tests: | 142 for failed_test in failed_tests: |
| 137 valid = failed_test.has_valid_results(api, suffix=revision) | 143 valid = failed_test.has_valid_results(api, suffix=revision) |
| 138 results[failed_test.name] = { | 144 results[failed_test.name] = { |
| 139 'status': TestResult.FAILED, | 145 'status': TestResult.FAILED, |
| 140 'valid': valid, | 146 'valid': valid, |
| 141 } | 147 } |
| 142 if valid: | 148 if valid: |
| 143 results[failed_test.name]['failures'] = list( | 149 test_list = list(failed_test.failures(api, suffix=revision)) |
| 144 failed_test.failures(api, suffix=revision)) | 150 results[failed_test.name]['failures'] = test_list |
| 151 failed_tests_dict[failed_test.name].extend(test_list) | |
| 145 | 152 |
| 146 # Process passed tests. | 153 # Process passed tests. |
| 147 for test in actual_tests_to_run: | 154 for test in actual_tests_to_run: |
| 148 if test not in failed_tests: | 155 if test not in failed_tests: |
| 149 results[test.name] = { | 156 results[test.name] = { |
| 150 'status': TestResult.PASSED, | 157 'status': TestResult.PASSED, |
| 151 'valid': True, | 158 'valid': True, |
| 152 } | 159 } |
| 153 | 160 |
| 154 # Process skipped tests in two scenarios: | 161 # Process skipped tests in two scenarios: |
| 155 # 1. Skipped by "analyze": tests are not affected by the given revision. | 162 # 1. Skipped by "analyze": tests are not affected by the given revision. |
| 156 # 2. Skipped because the requested tests don't exist at the given revision. | 163 # 2. Skipped because the requested tests don't exist at the given revision. |
| 157 for test_name in requested_tests.keys(): | 164 for test_name in requested_tests.keys(): |
| 158 if test_name not in results: | 165 if test_name not in results: |
| 159 results[test_name] = { | 166 results[test_name] = { |
| 160 'status': TestResult.SKIPPED, | 167 'status': TestResult.SKIPPED, |
| 161 'valid': True, | 168 'valid': True, |
| 162 } | 169 } |
| 163 | 170 |
| 164 return results | 171 return results, failed_tests_dict |
| 165 | 172 |
| 166 | 173 |
| 167 def RunSteps(api, target_mastername, target_testername, | 174 def _get_subtracted_test_dict(original_test_dict, failed_tests_dict): |
|
lijeffrey
2016/05/04 19:15:19
Naming this one's tricky, how about 'reduced' inst
chanli
2016/05/04 22:52:43
Done.
| |
| 168 good_revision, bad_revision, tests, use_analyze): | 175 # Remove tests that are in both dicts from the original test dict. |
| 176 if not failed_tests_dict: | |
| 177 return original_test_dict | |
| 178 subtracted_dict = defaultdict(list) | |
| 179 for step, tests in original_test_dict.iteritems(): | |
| 180 if step in failed_tests_dict: | |
| 181 for test in tests: | |
| 182 if test not in failed_tests_dict[step]: | |
| 183 subtracted_dict[step].append(test) | |
| 184 else: | |
| 185 subtracted_dict[step].extend(tests) | |
| 186 return subtracted_dict | |
| 187 | |
| 188 | |
| 189 def RunSteps(api, target_mastername, target_testername, good_revision, | |
| 190 bad_revision, tests, use_analyze, suspected_revisions): | |
| 169 assert tests, 'No failed tests were specified.' | 191 assert tests, 'No failed tests were specified.' |
| 170 | 192 |
| 171 # Figure out which builder configuration we should match for compile config. | 193 # Figure out which builder configuration we should match for compile config. |
| 172 # Sometimes, the builder itself runs the tests and there is no tester. In | 194 # Sometimes, the builder itself runs the tests and there is no tester. In |
| 173 # such cases, just treat the builder as a "tester". Thus, we default to | 195 # such cases, just treat the builder as a "tester". Thus, we default to |
| 174 # the target tester. | 196 # the target tester. |
| 175 tester_config = api.chromium_tests.builders.get( | 197 tester_config = api.chromium_tests.builders.get( |
| 176 target_mastername).get('builders', {}).get(target_testername) | 198 target_mastername).get('builders', {}).get(target_testername) |
| 177 target_buildername = (tester_config.get('parent_buildername') or | 199 target_buildername = (tester_config.get('parent_buildername') or |
| 178 target_testername) | 200 target_testername) |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 190 api.swarming.set_default_dimension(key, value) | 212 api.swarming.set_default_dimension(key, value) |
| 191 # TODO(stgao): Fix the issue that precommit=False adds the tag 'purpose:CI'. | 213 # TODO(stgao): Fix the issue that precommit=False adds the tag 'purpose:CI'. |
| 192 api.chromium_tests.configure_swarming('chromium', precommit=False) | 214 api.chromium_tests.configure_swarming('chromium', precommit=False) |
| 193 | 215 |
| 194 # Sync to bad revision, and retrieve revisions in the regression range. | 216 # Sync to bad revision, and retrieve revisions in the regression range. |
| 195 api.chromium_tests.prepare_checkout( | 217 api.chromium_tests.prepare_checkout( |
| 196 bot_config, | 218 bot_config, |
| 197 root_solution_revision=bad_revision) | 219 root_solution_revision=bad_revision) |
| 198 revisions_to_check = api.findit.revisions_between(good_revision, bad_revision) | 220 revisions_to_check = api.findit.revisions_between(good_revision, bad_revision) |
| 199 | 221 |
| 222 suspected_revision_index = [ | |
| 223 revisions_to_check.index(r) | |
| 224 for r in set(suspected_revisions) if r in revisions_to_check] | |
| 225 | |
| 226 # Segments revisions_to_check by suspected_revisions. | |
| 227 # Each sub_range will contain following elements: | |
| 228 # 1. Revision before a suspected_revision or None as a placeholder | |
| 229 # when no such revision | |
| 230 # 2. Suspected_revision | |
| 231 # 3. Revisions between a suspected_revision and the revision before next | |
| 232 # suspected_revision, or remaining revisions before all suspect_revisions. | |
| 233 # For example, if revisions_to_check are [r0, r1, ..., r6] and | |
| 234 # suspected_revisions are [r2, r5], sub_ranges will be: | |
| 235 # [[None, r0], [r1, r2, r3], [r4, r5, r6]] | |
| 236 if suspected_revision_index: | |
| 237 sub_ranges = [] | |
| 238 remaining_revisions = revisions_to_check[:] | |
| 239 for index in sorted(suspected_revision_index, reverse=True): | |
| 240 if index > 0: | |
| 241 if index < len(remaining_revisions): | |
| 242 sub_ranges.append(remaining_revisions[index - 1:]) | |
| 243 else: # Consecutive revisions are suspected, merges them in one range. | |
| 244 sub_ranges[-1].insert(0, remaining_revisions[index - 1]) | |
|
stgao
2016/05/04 21:35:28
Can we handle this separately by operating just on
chanli
2016/05/04 22:52:43
I don't rally understand what you're suggesting he
| |
| 245 remaining_revisions = remaining_revisions[:index - 1] | |
| 246 # None is a placeholder for the last known good revision. | |
| 247 sub_ranges.append([None] + remaining_revisions) | |
| 248 else: | |
| 249 # Treats the entire regression range as a single sub-range. | |
| 250 sub_ranges = [[None] + revisions_to_check] | |
| 251 | |
| 200 test_results = {} | 252 test_results = {} |
| 201 try_job_metadata = { | 253 try_job_metadata = { |
| 202 'regression_range_size': len(revisions_to_check) | 254 'regression_range_size': len(revisions_to_check) |
| 203 } | 255 } |
| 204 report = { | 256 report = { |
| 205 'result': test_results, | 257 'result': test_results, |
| 206 'metadata': try_job_metadata | 258 'metadata': try_job_metadata |
| 207 } | 259 } |
| 208 | 260 |
| 209 try: | 261 try: |
| 210 # We compile & run tests from the first revision to the last revision in the | 262 # Tests that haven't found culprits in tested revision(s). |
| 211 # regression range serially instead of a typical bisecting, because jumping | 263 no_culprit_tests = tests |
|
stgao
2016/05/04 21:35:28
naming nit: this name seems misleading -- these te
chanli
2016/05/04 22:52:43
Done.
| |
| 212 # between new and old revisions might affect Goma capacity and build cycle | 264 # Iterates through sub_ranges and find culprits for each failed test. |
| 213 # times. Thus we plan to go with this simple serial approach first so that | 265 for sub_range in sub_ranges: |
|
stgao
2016/05/04 21:35:28
Can we have a comment on why sub-ranges with newer
chanli
2016/05/04 22:52:43
Done.
| |
| 214 # compile would be fast due to incremental compile. | 266 if not no_culprit_tests: # All tests have found culprits. |
| 215 # If this won't work out, we will figure out a better solution for speed of | 267 break |
| 216 # both compile and test. | 268 |
| 217 for current_revision in revisions_to_check: | 269 # The revision right before the suspected revision provided by |
| 218 test_results[current_revision] = _compile_and_test_at_revision( | 270 # the heuristic result. |
| 219 api, target_mastername, target_buildername, target_testername, | 271 potential_green_rev = sub_range[0] |
| 220 current_revision, tests, use_analyze) | 272 following_revisions = sub_range[1:] |
| 221 # TODO(http://crbug.com/566975): check whether culprits for all failed | 273 if potential_green_rev: |
| 222 # tests are found and stop running tests at later revisions if so. | 274 test_results[potential_green_rev], tests_failed_in_potential_green = ( |
| 275 _compile_and_test_at_revision( | |
| 276 api, target_mastername, target_buildername, target_testername, | |
| 277 potential_green_rev, no_culprit_tests, use_analyze)) | |
| 278 else: | |
| 279 tests_failed_in_potential_green = {} | |
| 280 | |
| 281 tests_passed_in_potential_green = _get_subtracted_test_dict( | |
| 282 no_culprit_tests, tests_failed_in_potential_green) | |
| 283 | |
| 284 # Culprits for tests that failed in potential green should be earlier, so | |
| 285 # takes out passed tests and only runs failed ones in following revisions. | |
|
lijeffrey
2016/05/04 19:15:19
nit: 'remove' instead of 'takes out'
chanli
2016/05/04 22:52:43
Done.
| |
| 286 if tests_passed_in_potential_green: | |
| 287 tests_to_run = tests_passed_in_potential_green | |
| 288 for revision in following_revisions: | |
|
stgao
2016/05/04 21:35:28
Maybe add a comment on the general idea how this w
chanli
2016/05/04 22:52:43
Done.
| |
| 289 test_results[revision], tests_failed_in_revision = ( | |
| 290 _compile_and_test_at_revision( | |
| 291 api, target_mastername, target_buildername, target_testername, | |
| 292 revision, tests_to_run, use_analyze)) | |
| 293 | |
| 294 # Takes out tests that passed in potential green and failed in | |
| 295 # following revisions: culprits have been found for them. | |
| 296 no_culprit_tests = _get_subtracted_test_dict( | |
| 297 no_culprit_tests, tests_failed_in_revision) | |
| 298 | |
| 299 # Only runs tests that have not found culprits in later revisions. | |
| 300 tests_to_run = _get_subtracted_test_dict( | |
| 301 tests_to_run, tests_failed_in_revision) | |
| 302 | |
| 303 if not tests_to_run: | |
| 304 break | |
| 305 | |
| 223 finally: | 306 finally: |
| 224 # Give the full report including test results and metadata. | 307 # Give the full report including test results and metadata. |
| 225 step_result = api.python.succeeding_step( | 308 step_result = api.python.succeeding_step( |
| 226 'report', [json.dumps(report, indent=2)], as_log='report') | 309 'report', [json.dumps(report, indent=2)], as_log='report') |
| 227 | 310 |
| 228 # Set the report as a build property too, so that it will be reported back | 311 # Set the report as a build property too, so that it will be reported back |
| 229 # to Buildbucket and Findit will pull from there instead of buildbot master. | 312 # to Buildbucket and Findit will pull from there instead of buildbot master. |
| 230 step_result.presentation.properties['report'] = report | 313 step_result.presentation.properties['report'] = report |
| 231 | 314 |
| 232 return report | 315 return report |
| 233 | 316 |
| 234 | 317 |
| 235 def GenTests(api): | 318 def GenTests(api): |
| 236 def props(tests, platform_name, tester_name, use_analyze=False): | 319 def props( |
| 320 tests, platform_name, tester_name, use_analyze=False, good_revision=None, | |
| 321 bad_revision=None, suspected_revisions=None): | |
| 237 properties = { | 322 properties = { |
| 238 'mastername': 'tryserver.chromium.%s' % platform_name, | 323 'mastername': 'tryserver.chromium.%s' % platform_name, |
| 239 'buildername': '%s_chromium_variable' % platform_name, | 324 'buildername': '%s_chromium_variable' % platform_name, |
| 240 'slavename': 'build1-a1', | 325 'slavename': 'build1-a1', |
| 241 'buildnumber': 1, | 326 'buildnumber': 1, |
| 242 'target_mastername': 'chromium.%s' % platform_name, | 327 'target_mastername': 'chromium.%s' % platform_name, |
| 243 'target_testername': tester_name, | 328 'target_testername': tester_name, |
| 244 'good_revision': 'r0', | 329 'good_revision': good_revision or 'r0', |
| 245 'bad_revision': 'r1', | 330 'bad_revision': bad_revision or 'r1', |
| 246 'tests': tests, | 331 'tests': tests, |
| 247 'use_analyze': use_analyze, | 332 'use_analyze': use_analyze, |
| 248 } | 333 } |
| 334 if suspected_revisions: | |
| 335 properties['suspected_revisions'] = suspected_revisions | |
| 249 return api.properties(**properties) + api.platform.name(platform_name) | 336 return api.properties(**properties) + api.platform.name(platform_name) |
| 250 | 337 |
| 251 def simulated_gtest_output(failed_test_names=(), passed_test_names=()): | 338 def simulated_gtest_output(failed_test_names=(), passed_test_names=()): |
| 252 cur_iteration_data = {} | 339 cur_iteration_data = {} |
| 253 for test_name in failed_test_names: | 340 for test_name in failed_test_names: |
| 254 cur_iteration_data[test_name] = [{ | 341 cur_iteration_data[test_name] = [{ |
| 255 'elapsed_time_ms': 0, | 342 'elapsed_time_ms': 0, |
| 256 'output_snippet': '', | 343 'output_snippet': '', |
| 257 'status': 'FAILURE', | 344 'status': 'FAILURE', |
| 258 }] | 345 }] |
| (...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 460 'swarming': {'can_use_on_swarming_builders': True}, | 547 'swarming': {'can_use_on_swarming_builders': True}, |
| 461 }, | 548 }, |
| 462 ], | 549 ], |
| 463 }, | 550 }, |
| 464 })) + | 551 })) + |
| 465 api.override_step_data( | 552 api.override_step_data( |
| 466 'test r1.gl_tests (r1) on Mac-10.9', | 553 'test r1.gl_tests (r1) on Mac-10.9', |
| 467 simulated_gtest_output(passed_test_names=['Test.One']) | 554 simulated_gtest_output(passed_test_names=['Test.One']) |
| 468 ) | 555 ) |
| 469 ) | 556 ) |
| 557 | |
| 558 yield ( | |
| 559 api.test('findit_culprit_in_last_sub_range') + | |
| 560 props( | |
| 561 {'gl_tests': ['Test.One']}, 'mac', 'Mac10.9 Tests', use_analyze=False, | |
| 562 good_revision='r0', bad_revision='r6', suspected_revisions=['r3']) + | |
| 563 api.override_step_data('test r2.read test spec', api.json.output({ | |
| 564 'Mac10.9 Tests': { | |
| 565 'gtest_tests': [ | |
| 566 { | |
| 567 'test': 'gl_tests', | |
| 568 'swarming': {'can_use_on_swarming_builders': True}, | |
| 569 }, | |
| 570 ], | |
| 571 }, | |
| 572 })) + | |
| 573 api.override_step_data('test r3.read test spec', api.json.output({ | |
| 574 'Mac10.9 Tests': { | |
| 575 'gtest_tests': [ | |
| 576 { | |
| 577 'test': 'gl_tests', | |
| 578 'swarming': {'can_use_on_swarming_builders': True}, | |
| 579 }, | |
| 580 ], | |
| 581 }, | |
| 582 })) + | |
| 583 api.override_step_data( | |
| 584 'git commits in range', | |
| 585 api.raw_io.stream_output( | |
| 586 '\n'.join('r%d' % i for i in reversed(range(1, 7))))) + | |
| 587 api.override_step_data( | |
| 588 'test r2.gl_tests (r2) on Mac-10.9', | |
| 589 simulated_gtest_output(passed_test_names=['Test.One'])) + | |
| 590 api.override_step_data( | |
| 591 'test r3.gl_tests (r3) on Mac-10.9', | |
| 592 simulated_gtest_output(failed_test_names=['Test.One'])) | |
| 593 ) | |
| 594 | |
| 595 yield ( | |
| 596 api.test('findit_culprit_in_middle_sub_range') + | |
| 597 props( | |
| 598 {'gl_tests': ['Test.One']}, 'mac', 'Mac10.9 Tests', use_analyze=False, | |
| 599 good_revision='r0', bad_revision='r6', | |
| 600 suspected_revisions=['r3', 'r6']) + | |
| 601 api.override_step_data('test r2.read test spec', api.json.output({ | |
| 602 'Mac10.9 Tests': { | |
| 603 'gtest_tests': [ | |
| 604 { | |
| 605 'test': 'gl_tests', | |
| 606 'swarming': {'can_use_on_swarming_builders': True}, | |
| 607 }, | |
| 608 ], | |
| 609 }, | |
| 610 })) + | |
| 611 api.override_step_data('test r3.read test spec', api.json.output({ | |
| 612 'Mac10.9 Tests': { | |
| 613 'gtest_tests': [ | |
| 614 { | |
| 615 'test': 'gl_tests', | |
| 616 'swarming': {'can_use_on_swarming_builders': True}, | |
| 617 }, | |
| 618 ], | |
| 619 }, | |
| 620 })) + | |
| 621 api.override_step_data('test r5.read test spec', api.json.output({ | |
| 622 'Mac10.9 Tests': { | |
| 623 'gtest_tests': [ | |
| 624 { | |
| 625 'test': 'gl_tests', | |
| 626 'swarming': {'can_use_on_swarming_builders': True}, | |
| 627 }, | |
| 628 ], | |
| 629 }, | |
| 630 })) + | |
| 631 api.override_step_data('test r6.read test spec', api.json.output({ | |
| 632 'Mac10.9 Tests': { | |
| 633 'gtest_tests': [ | |
| 634 { | |
| 635 'test': 'gl_tests', | |
| 636 'swarming': {'can_use_on_swarming_builders': True}, | |
| 637 }, | |
| 638 ], | |
| 639 }, | |
| 640 })) + | |
| 641 api.override_step_data( | |
| 642 'git commits in range', | |
| 643 api.raw_io.stream_output( | |
| 644 '\n'.join('r%d' % i for i in reversed(range(1, 7))))) + | |
| 645 api.override_step_data( | |
| 646 'test r2.gl_tests (r2) on Mac-10.9', | |
| 647 simulated_gtest_output(passed_test_names=['Test.One'])) + | |
| 648 api.override_step_data( | |
| 649 'test r3.gl_tests (r3) on Mac-10.9', | |
| 650 simulated_gtest_output(failed_test_names=['Test.One'])) + | |
| 651 api.override_step_data( | |
| 652 'test r5.gl_tests (r5) on Mac-10.9', | |
| 653 simulated_gtest_output(passed_test_names=['Test.One'])) + | |
| 654 api.override_step_data( | |
| 655 'test r6.gl_tests (r6) on Mac-10.9', | |
| 656 simulated_gtest_output(passed_test_names=['Test.One'])) | |
| 657 ) | |
| 658 | |
| 659 yield ( | |
| 660 api.test('findit_culprit_in_first_sub_range') + | |
| 661 props( | |
| 662 {'gl_tests': ['Test.One']}, 'mac', 'Mac10.9 Tests', use_analyze=False, | |
| 663 good_revision='r0', bad_revision='r6', | |
| 664 suspected_revisions=['r6']) + | |
| 665 api.override_step_data('test r1.read test spec', api.json.output({ | |
| 666 'Mac10.9 Tests': { | |
| 667 'gtest_tests': [ | |
| 668 { | |
| 669 'test': 'gl_tests', | |
| 670 'swarming': {'can_use_on_swarming_builders': True}, | |
| 671 }, | |
| 672 ], | |
| 673 }, | |
| 674 })) + | |
| 675 api.override_step_data('test r5.read test spec', api.json.output({ | |
| 676 'Mac10.9 Tests': { | |
| 677 'gtest_tests': [ | |
| 678 { | |
| 679 'test': 'gl_tests', | |
| 680 'swarming': {'can_use_on_swarming_builders': True}, | |
| 681 }, | |
| 682 ], | |
| 683 }, | |
| 684 })) + | |
| 685 api.override_step_data('test r6.read test spec', api.json.output({ | |
| 686 'Mac10.9 Tests': { | |
| 687 'gtest_tests': [ | |
| 688 { | |
| 689 'test': 'gl_tests', | |
| 690 'swarming': {'can_use_on_swarming_builders': True}, | |
| 691 }, | |
| 692 ], | |
| 693 }, | |
| 694 })) + | |
| 695 api.override_step_data( | |
| 696 'git commits in range', | |
| 697 api.raw_io.stream_output( | |
| 698 '\n'.join('r%d' % i for i in reversed(range(1, 7))))) + | |
| 699 api.override_step_data( | |
| 700 'test r1.gl_tests (r1) on Mac-10.9', | |
| 701 simulated_gtest_output(failed_test_names=['Test.One'])) + | |
| 702 api.override_step_data( | |
| 703 'test r5.gl_tests (r5) on Mac-10.9', | |
| 704 simulated_gtest_output(passed_test_names=['Test.One'])) + | |
| 705 api.override_step_data( | |
| 706 'test r6.gl_tests (r6) on Mac-10.9', | |
| 707 simulated_gtest_output(passed_test_names=['Test.One'])) | |
| 708 ) | |
| 709 | |
| 710 yield ( | |
| 711 api.test('findit_steps_multiple_culprits') + | |
| 712 props( | |
| 713 {'gl_tests': ['Test.gl_One'], 'browser_tests': ['Test.browser_One']}, | |
| 714 'mac', 'Mac10.9 Tests', use_analyze=False, | |
| 715 good_revision='r0', bad_revision='r6', | |
| 716 suspected_revisions=['r3','r6']) + | |
| 717 api.override_step_data('test r2.read test spec', api.json.output({ | |
| 718 'Mac10.9 Tests': { | |
| 719 'gtest_tests': [ | |
| 720 { | |
| 721 'test': 'gl_tests', | |
| 722 'swarming': {'can_use_on_swarming_builders': True}, | |
| 723 }, | |
| 724 { | |
| 725 'test': 'browser_tests', | |
| 726 'swarming': {'can_use_on_swarming_builders': True}, | |
| 727 }, | |
| 728 ], | |
| 729 }, | |
| 730 })) + | |
| 731 api.override_step_data('test r3.read test spec', api.json.output({ | |
| 732 'Mac10.9 Tests': { | |
| 733 'gtest_tests': [ | |
| 734 { | |
| 735 'test': 'gl_tests', | |
| 736 'swarming': {'can_use_on_swarming_builders': True}, | |
| 737 }, | |
| 738 { | |
| 739 'test': 'browser_tests', | |
| 740 'swarming': {'can_use_on_swarming_builders': True}, | |
| 741 }, | |
| 742 ], | |
| 743 }, | |
| 744 })) + | |
| 745 api.override_step_data('test r5.read test spec', api.json.output({ | |
| 746 'Mac10.9 Tests': { | |
| 747 'gtest_tests': [ | |
| 748 { | |
| 749 'test': 'gl_tests', | |
| 750 'swarming': {'can_use_on_swarming_builders': True}, | |
| 751 }, | |
| 752 { | |
| 753 'test': 'browser_tests', | |
| 754 'swarming': {'can_use_on_swarming_builders': True}, | |
| 755 }, | |
| 756 ], | |
| 757 }, | |
| 758 })) + | |
| 759 api.override_step_data('test r6.read test spec', api.json.output({ | |
| 760 'Mac10.9 Tests': { | |
| 761 'gtest_tests': [ | |
| 762 { | |
| 763 'test': 'gl_tests', | |
| 764 'swarming': {'can_use_on_swarming_builders': True}, | |
| 765 }, | |
| 766 { | |
| 767 'test': 'browser_tests', | |
| 768 'swarming': {'can_use_on_swarming_builders': True}, | |
| 769 }, | |
| 770 ], | |
| 771 }, | |
| 772 })) + | |
| 773 api.override_step_data( | |
| 774 'git commits in range', | |
| 775 api.raw_io.stream_output( | |
| 776 '\n'.join('r%d' % i for i in reversed(range(1, 7))))) + | |
| 777 api.override_step_data( | |
| 778 'test r5.gl_tests (r5) on Mac-10.9', | |
| 779 simulated_gtest_output(failed_test_names=['Test.gl_One'])) + | |
| 780 api.override_step_data( | |
| 781 'test r5.browser_tests (r5) on Mac-10.9', | |
| 782 simulated_gtest_output(passed_test_names=['Test.browser_One'])) + | |
| 783 api.override_step_data( | |
| 784 'test r6.browser_tests (r6) on Mac-10.9', | |
| 785 simulated_gtest_output(failed_test_names=['Test.browser_One']))+ | |
| 786 api.override_step_data( | |
| 787 'test r2.gl_tests (r2) on Mac-10.9', | |
| 788 simulated_gtest_output(passed_test_names=['Test.gl_One'])) + | |
| 789 api.override_step_data( | |
| 790 'test r3.gl_tests (r3) on Mac-10.9', | |
| 791 simulated_gtest_output(failed_test_names=['Test.gl_One'])) | |
| 792 ) | |
| 793 | |
| 794 yield ( | |
| 795 api.test('findit_tests_multiple_culprits') + | |
| 796 props( | |
| 797 {'gl_tests': ['Test.One', 'Test.Two', 'Test.Three']}, | |
| 798 'mac', 'Mac10.9 Tests', use_analyze=False, | |
| 799 good_revision='r0', bad_revision='r6', | |
| 800 suspected_revisions=['r3', 'r5']) + | |
| 801 api.override_step_data('test r2.read test spec', api.json.output({ | |
| 802 'Mac10.9 Tests': { | |
| 803 'gtest_tests': [ | |
| 804 { | |
| 805 'test': 'gl_tests', | |
| 806 'swarming': {'can_use_on_swarming_builders': True}, | |
| 807 }, | |
| 808 ], | |
| 809 }, | |
| 810 })) + | |
| 811 api.override_step_data('test r3.read test spec', api.json.output({ | |
| 812 'Mac10.9 Tests': { | |
| 813 'gtest_tests': [ | |
| 814 { | |
| 815 'test': 'gl_tests', | |
| 816 'swarming': {'can_use_on_swarming_builders': True}, | |
| 817 }, | |
| 818 ], | |
| 819 }, | |
| 820 })) + | |
| 821 api.override_step_data('test r4.read test spec', api.json.output({ | |
| 822 'Mac10.9 Tests': { | |
| 823 'gtest_tests': [ | |
| 824 { | |
| 825 'test': 'gl_tests', | |
| 826 'swarming': {'can_use_on_swarming_builders': True}, | |
| 827 }, | |
| 828 ], | |
| 829 }, | |
| 830 })) + | |
| 831 api.override_step_data('test r5.read test spec', api.json.output({ | |
| 832 'Mac10.9 Tests': { | |
| 833 'gtest_tests': [ | |
| 834 { | |
| 835 'test': 'gl_tests', | |
| 836 'swarming': {'can_use_on_swarming_builders': True}, | |
| 837 }, | |
| 838 ], | |
| 839 }, | |
| 840 })) + | |
| 841 api.override_step_data('test r6.read test spec', api.json.output({ | |
| 842 'Mac10.9 Tests': { | |
| 843 'gtest_tests': [ | |
| 844 { | |
| 845 'test': 'gl_tests', | |
| 846 'swarming': {'can_use_on_swarming_builders': True}, | |
| 847 }, | |
| 848 ], | |
| 849 }, | |
| 850 })) + | |
| 851 api.override_step_data( | |
| 852 'git commits in range', | |
| 853 api.raw_io.stream_output( | |
| 854 '\n'.join('r%d' % i for i in reversed(range(1, 7))))) + | |
| 855 api.override_step_data( | |
| 856 'test r4.gl_tests (r4) on Mac-10.9', | |
| 857 simulated_gtest_output(passed_test_names=['Test.One', 'Test.Three'], | |
| 858 failed_test_names=['Test.Two'])) + | |
| 859 api.override_step_data( | |
| 860 'test r5.gl_tests (r5) on Mac-10.9', | |
| 861 simulated_gtest_output(passed_test_names=['Test.One'], | |
| 862 failed_test_names=['Test.Three'])) + | |
| 863 api.override_step_data( | |
| 864 'test r6.gl_tests (r6) on Mac-10.9', | |
| 865 simulated_gtest_output(failed_test_names=['Test.One']))+ | |
| 866 api.override_step_data( | |
| 867 'test r2.gl_tests (r2) on Mac-10.9', | |
| 868 simulated_gtest_output(passed_test_names=['Test.Two'])) + | |
| 869 api.override_step_data( | |
| 870 'test r3.gl_tests (r3) on Mac-10.9', | |
| 871 simulated_gtest_output(failed_test_names=['Test.Two'])) | |
| 872 ) | |
| OLD | NEW |