| OLD | NEW |
| 1 # Copyright 2016 The Chromium Authors. All rights reserved. | 1 # Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 from collections import defaultdict | 5 from collections import defaultdict |
| 6 import copy | 6 import copy |
| 7 | 7 |
| 8 from handlers import result_status | 8 from handlers import result_status |
| 9 from model import analysis_status | 9 from model import analysis_status |
| 10 from model.wf_analysis import WfAnalysis | 10 from model.wf_analysis import WfAnalysis |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 69 'all_tests': ['test1'] | 69 'all_tests': ['test1'] |
| 70 } | 70 } |
| 71 } | 71 } |
| 72 } | 72 } |
| 73 } | 73 } |
| 74 """ | 74 """ |
| 75 | 75 |
| 76 tasks_info = defaultdict(lambda: defaultdict(lambda: defaultdict(dict))) | 76 tasks_info = defaultdict(lambda: defaultdict(lambda: defaultdict(dict))) |
| 77 | 77 |
| 78 swarming_server = waterfall_config.GetSwarmingSettings()['server_host'] | 78 swarming_server = waterfall_config.GetSwarmingSettings()['server_host'] |
| 79 |
| 79 for step_name, failure in failure_result_map.iteritems(): | 80 for step_name, failure in failure_result_map.iteritems(): |
| 80 step_tasks_info = tasks_info[step_name]['swarming_tasks'] | 81 step_tasks_info = tasks_info[step_name]['swarming_tasks'] |
| 82 |
| 81 if isinstance(failure, dict): | 83 if isinstance(failure, dict): |
| 82 # Only swarming test failures have swarming re-runs. | 84 # Only swarming test failures have swarming re-runs. |
| 83 swarming_task_keys = set(failure.values()) | 85 swarming_task_keys = set(failure.values()) |
| 84 | 86 |
| 85 for key in swarming_task_keys: | 87 for key in swarming_task_keys: |
| 86 task_dict = step_tasks_info[key] | 88 task_dict = step_tasks_info[key] |
| 87 referred_build_keys = key.split('/') | 89 referred_build_keys = key.split('/') |
| 88 task = WfSwarmingTask.Get(*referred_build_keys, step_name=step_name) | 90 task = WfSwarmingTask.Get(*referred_build_keys, step_name=step_name) |
| 89 all_tests = _GetAllTestsForASwarmingTask(key, failure) | 91 all_tests = _GetAllTestsForASwarmingTask(key, failure) |
| 90 task_dict['all_tests'] = all_tests | 92 task_dict['all_tests'] = all_tests |
| (...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 183 revision = culprit['revision'] | 185 revision = culprit['revision'] |
| 184 if organized_culprits.get(revision): | 186 if organized_culprits.get(revision): |
| 185 organized_culprits[revision]['failed_tests'].append(test_name) | 187 organized_culprits[revision]['failed_tests'].append(test_name) |
| 186 else: | 188 else: |
| 187 organized_culprits[revision] = culprit | 189 organized_culprits[revision] = culprit |
| 188 organized_culprits[revision]['failed_tests'] = [test_name] | 190 organized_culprits[revision]['failed_tests'] = [test_name] |
| 189 | 191 |
| 190 return organized_culprits | 192 return organized_culprits |
| 191 | 193 |
| 192 | 194 |
| 193 def _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info): | 195 def _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info, |
| 196 show_debug_info): |
| 194 referred_build_keys = try_job_key.split('/') | 197 referred_build_keys = try_job_key.split('/') |
| 195 try_job = WfTryJob.Get(*referred_build_keys) | 198 try_job = WfTryJob.Get(*referred_build_keys) |
| 199 |
| 196 if not try_job or try_job.compile_results: | 200 if not try_job or try_job.compile_results: |
| 197 return | 201 return |
| 198 | 202 |
| 199 try_job_result = try_job.test_results[-1] if try_job.test_results else None | 203 try_job_result = try_job.test_results[-1] if try_job.test_results else None |
| 200 | 204 |
| 201 for step_try_jobs in culprits_info.values(): | 205 for step_try_jobs in culprits_info.values(): |
| 202 # If try job found different culprits for each test, split tests by culprit. | 206 # If try job found different culprits for each test, split tests by culprit. |
| 203 additional_tests_culprit_info = [] | 207 additional_tests_culprit_info = [] |
| 208 |
| 204 for try_job_info in step_try_jobs['try_jobs']: | 209 for try_job_info in step_try_jobs['try_jobs']: |
| 205 if (try_job_key != try_job_info['try_job_key'] | 210 if (try_job_key != try_job_info['try_job_key'] |
| 206 or try_job_info.get('status')): | 211 or (try_job_info.get('status') and not show_debug_info)): |
| 207 # Conditions that try_job_info has status are: | 212 # Conditions that try_job_info has status are: |
| 208 # If there is no swarming task, there won't be try job; | 213 # If there is no swarming task, there won't be try job; |
| 209 # If the swarming task is not completed yet, there won't be try job yet; | 214 # If the swarming task is not completed yet, there won't be try job yet; |
| 210 # If there are flaky tests found, those tests will be marked as flaky, | 215 # If there are flaky tests found, those tests will be marked as flaky, |
| 211 # and no try job for them will be triggered. | 216 # and no try job for them will be triggered. |
| 217 # If a try job was force triggered by an admin. |
| 212 continue | 218 continue |
| 213 | 219 |
| 214 try_job_info['status'] = try_job.status | 220 try_job_info['status'] = try_job.status |
| 215 if try_job_result: | 221 if try_job_result: |
| 216 # Needs to use ref_name to match step_name in try job. | 222 # Needs to use ref_name to match step_name in try job. |
| 217 ref_name = try_job_info['ref_name'] | 223 ref_name = try_job_info['ref_name'] |
| 218 # Saves try job information. | 224 # Saves try job information. |
| 219 if try_job_result.get('url'): # pragma: no cover | 225 if try_job_result.get('url'): # pragma: no cover |
| 220 try_job_info['try_job_url'] = try_job_result['url'] | 226 try_job_info['try_job_url'] = try_job_result['url'] |
| 221 try_job_info['try_job_build_number'] = ( | 227 try_job_info['try_job_build_number'] = ( |
| 222 _GetTryJobBuildNumber(try_job_result['url'])) | 228 _GetTryJobBuildNumber(try_job_result['url'])) |
| 223 | 229 |
| 224 if (try_job_result.get('culprit') and | 230 if (try_job_result.get('culprit') and |
| 225 try_job_result['culprit'].get(ref_name)): | 231 try_job_result['culprit'].get(ref_name)): |
| 226 # Saves try job culprits information. | 232 # Saves try job culprits information. |
| 227 | 233 |
| 228 # Uses culprits to group tests. | 234 # Uses culprits to group tests. |
| 229 culprit_tests_map = _OrganizeTryJobResultByCulprits( | 235 culprit_tests_map = _OrganizeTryJobResultByCulprits( |
| 230 try_job_result['culprit'][ref_name]) | 236 try_job_result['culprit'][ref_name]) |
| 231 ungrouped_tests = try_job_info['tests'] | 237 |
| 238 ungrouped_tests = try_job_info.get('tests', []) |
| 232 list_of_culprits = [] | 239 list_of_culprits = [] |
| 233 for culprit_info in culprit_tests_map.values(): | 240 for culprit_info in culprit_tests_map.values(): |
| 234 failed_tests = culprit_info['failed_tests'] | 241 failed_tests = culprit_info['failed_tests'] |
| 235 list_of_culprits.append(culprit_info) | 242 list_of_culprits.append(culprit_info) |
| 236 # Gets tests that haven't been grouped. | 243 # Gets tests that haven't been grouped. |
| 237 ungrouped_tests = list( | 244 ungrouped_tests = list( |
| 238 set(ungrouped_tests) ^ set(failed_tests)) | 245 set(ungrouped_tests) ^ set(failed_tests)) |
| 239 if not ungrouped_tests: | 246 if not ungrouped_tests: |
| 240 # All tests have been grouped. | 247 # All tests have been grouped. |
| 241 break | 248 break |
| (...skipping 28 matching lines...) Expand all Loading... |
| 270 'review_url': iterate_culprit.get( | 277 'review_url': iterate_culprit.get( |
| 271 'url', iterate_culprit.get('review_url', None)) | 278 'url', iterate_culprit.get('review_url', None)) |
| 272 } | 279 } |
| 273 tmp_try_job_info['tests'] = iterate_culprit['failed_tests'] | 280 tmp_try_job_info['tests'] = iterate_culprit['failed_tests'] |
| 274 additional_tests_culprit_info.append(tmp_try_job_info) | 281 additional_tests_culprit_info.append(tmp_try_job_info) |
| 275 | 282 |
| 276 if additional_tests_culprit_info: | 283 if additional_tests_culprit_info: |
| 277 step_try_jobs['try_jobs'].extend(additional_tests_culprit_info) | 284 step_try_jobs['try_jobs'].extend(additional_tests_culprit_info) |
| 278 | 285 |
| 279 | 286 |
| 280 def _UpdateTryJobInfoBasedOnSwarming(step_tasks_info, try_jobs): | 287 def _UpdateTryJobInfoBasedOnSwarming(step_tasks_info, try_jobs, |
| 288 show_debug_info=False, |
| 289 step_name=None): |
| 281 """ | 290 """ |
| 282 Args: | 291 Args: |
| 283 step_tasks_info (dict): A dict of swarming task info for this step. | 292 step_tasks_info (dict): A dict of swarming task info for this step. |
| 284 It is the result from _GenerateSwarmingTasksData. | 293 It is the result from _GenerateSwarmingTasksData. |
| 285 try_jobs (list): A list to save try job data for the step, format as below: | 294 try_jobs (list): A list to save try job data for the step, format as below: |
| 286 [ | 295 [ |
| 287 { | 296 { |
| 288 'try_job_key': 'm/b/120' | 297 'try_job_key': 'm/b/120' |
| 289 }, | 298 }, |
| 290 { | 299 { |
| 291 'try_job_key': 'm/b/121' | 300 'try_job_key': 'm/b/121' |
| 292 }, | 301 }, |
| 293 ... | 302 ... |
| 294 ] | 303 ] |
| 295 """ | 304 """ |
| 296 additional_flakiness_list = [] | 305 additional_flakiness_list = [] |
| 297 for try_job in try_jobs: | 306 for try_job in try_jobs: |
| 298 try_job_key = try_job['try_job_key'] | 307 try_job_key = try_job['try_job_key'] |
| 299 task = step_tasks_info.get('swarming_tasks', {}).get(try_job_key) | 308 task = step_tasks_info.get('swarming_tasks', {}).get(try_job_key) |
| 300 | 309 |
| 301 if task['task_info']['status'] != analysis_status.COMPLETED: | 310 if (task['task_info']['status'] != analysis_status.COMPLETED and |
| 311 not show_debug_info): |
| 302 # There is someting wrong with swarming task or it's not done yet, | 312 # There is someting wrong with swarming task or it's not done yet, |
| 303 # no try job yet or ever. | 313 # no try job yet or ever. |
| 304 try_job['status'] = result_status.NO_TRY_JOB_REASON_MAP[ | 314 try_job['status'] = result_status.NO_TRY_JOB_REASON_MAP[ |
| 305 task['task_info']['status']] | 315 task['task_info']['status']] |
| 306 try_job['tests'] = task.get('all_tests', []) | 316 try_job['tests'] = task.get('all_tests', []) |
| 307 else: | 317 else: |
| 308 # Swarming task is completed, group tests according to task result. | 318 # Swarming task is completed or a manual try job rerun was triggered. |
| 309 try_job['ref_name'] = task['ref_name'] | 319 # Group tests according to task result. |
| 320 if task.get('ref_name'): |
| 321 try_job['ref_name'] = task['ref_name'] |
| 322 else: |
| 323 # A try job was forced for a non-swarming step. |
| 324 if show_debug_info: # pragma: no branch |
| 325 try_job['ref_name'] = step_name.split()[0] |
| 326 |
| 310 if task.get('reliable_tests'): | 327 if task.get('reliable_tests'): |
| 311 try_job['tests'] = task['reliable_tests'] | 328 try_job['tests'] = task['reliable_tests'] |
| 312 if task.get('flaky_tests'): | 329 if task.get('flaky_tests'): |
| 313 # Split this try job into two groups: flaky group and reliable group. | 330 # Split this try job into two groups: flaky group and reliable group. |
| 314 flaky_try_job = copy.deepcopy(try_job) | 331 flaky_try_job = copy.deepcopy(try_job) |
| 315 flaky_try_job['status'] = result_status.FLAKY | 332 flaky_try_job['status'] = result_status.FLAKY |
| 316 flaky_try_job['tests'] = task['flaky_tests'] | 333 flaky_try_job['tests'] = task['flaky_tests'] |
| 317 flaky_try_job['task_id'] = task['task_info']['task_id'] | 334 flaky_try_job['task_id'] = task['task_info']['task_id'] |
| 318 flaky_try_job['task_url'] = task['task_info']['task_url'] | 335 flaky_try_job['task_url'] = task['task_info']['task_url'] |
| 319 additional_flakiness_list.append(flaky_try_job) | 336 additional_flakiness_list.append(flaky_try_job) |
| 320 elif task.get('flaky_tests'): # pragma: no cover | 337 elif task.get('flaky_tests'): # pragma: no cover |
| 321 # All Flaky. | 338 # All Flaky. |
| 322 try_job['status'] = result_status.FLAKY | 339 try_job['status'] = result_status.FLAKY |
| 323 try_job['tests'] = task['flaky_tests'] | 340 try_job['tests'] = task['flaky_tests'] |
| 324 | 341 |
| 325 if task['task_info'].get('task_id'): | 342 if task['task_info'].get('task_id'): |
| 326 try_job['task_id'] = task['task_info']['task_id'] | 343 try_job['task_id'] = task['task_info']['task_id'] |
| 327 try_job['task_url'] = task['task_info']['task_url'] | 344 try_job['task_url'] = task['task_info']['task_url'] |
| 328 | 345 |
| 329 try_jobs.extend(additional_flakiness_list) | 346 try_jobs.extend(additional_flakiness_list) |
| 330 | 347 |
| 331 | 348 |
| 332 def _GetAllTryJobResultsForTest(failure_result_map, tasks_info): | 349 def _GetAllTryJobResultsForTest(failure_result_map, tasks_info, |
| 350 show_debug_info=False): |
| 333 culprits_info = defaultdict(lambda: defaultdict(list)) | 351 culprits_info = defaultdict(lambda: defaultdict(list)) |
| 334 if not tasks_info: | 352 if not tasks_info: |
| 335 return culprits_info | 353 return culprits_info |
| 336 | 354 |
| 337 try_job_keys = set() | 355 try_job_keys = set() |
| 356 |
| 338 for step_name, step_failure_result_map in failure_result_map.iteritems(): | 357 for step_name, step_failure_result_map in failure_result_map.iteritems(): |
| 339 try_jobs = culprits_info[step_name]['try_jobs'] | 358 try_jobs = culprits_info[step_name]['try_jobs'] |
| 340 | |
| 341 if isinstance(step_failure_result_map, dict): | 359 if isinstance(step_failure_result_map, dict): |
| 342 step_try_job_keys = set() | 360 step_try_job_keys = set() |
| 343 for try_job_key in step_failure_result_map.values(): | 361 for try_job_key in step_failure_result_map.values(): |
| 344 if try_job_key not in step_try_job_keys: | 362 if try_job_key not in step_try_job_keys: |
| 345 try_job_dict = { | 363 try_job_dict = { |
| 346 'try_job_key': try_job_key | 364 'try_job_key': try_job_key |
| 347 } | 365 } |
| 348 try_jobs.append(try_job_dict) | 366 try_jobs.append(try_job_dict) |
| 349 step_try_job_keys.add(try_job_key) | 367 step_try_job_keys.add(try_job_key) |
| 350 try_job_keys.update(step_try_job_keys) | 368 try_job_keys.update(step_try_job_keys) |
| 351 else: | 369 else: |
| 352 # Try job should only be triggered for swarming tests, because we cannot | 370 # By default Findit should only trigger try jobs for swarming tests, |
| 353 # identify flaky tests for non-swarming tests. | 371 # because we cannot identify flaky tests for non-swarming tests. |
| 354 try_job_dict = { | 372 try_job_dict = { |
| 355 'try_job_key': step_failure_result_map | 373 'try_job_key': step_failure_result_map |
| 356 } | 374 } |
| 357 try_jobs.append(try_job_dict) | 375 try_jobs.append(try_job_dict) |
| 358 | 376 |
| 359 _UpdateTryJobInfoBasedOnSwarming(tasks_info[step_name], try_jobs) | 377 if show_debug_info: |
| 378 # Include any forced try jobs trigered manually in debug mode. |
| 379 try_job_keys.add(step_failure_result_map) |
| 380 |
| 381 _UpdateTryJobInfoBasedOnSwarming(tasks_info[step_name], try_jobs, |
| 382 show_debug_info, |
| 383 step_name) |
| 360 | 384 |
| 361 for try_job_key in try_job_keys: | 385 for try_job_key in try_job_keys: |
| 362 _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info) | 386 _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info, |
| 363 | 387 show_debug_info) |
| 364 return culprits_info | 388 return culprits_info |
| 365 | 389 |
| 366 | 390 |
| 367 def _GetTryJobResultForCompile(failure_result_map): | 391 def _GetTryJobResultForCompile(failure_result_map): |
| 368 try_job_key = failure_result_map['compile'] | 392 try_job_key = failure_result_map['compile'] |
| 369 referred_build_keys = try_job_key.split('/') | 393 referred_build_keys = try_job_key.split('/') |
| 370 culprit_info = defaultdict(lambda: defaultdict(list)) | 394 culprit_info = defaultdict(lambda: defaultdict(list)) |
| 371 | 395 |
| 372 try_job = WfTryJob.Get(*referred_build_keys) | 396 try_job = WfTryJob.Get(*referred_build_keys) |
| 373 if not try_job or try_job.test_results: | 397 if not try_job or try_job.test_results: |
| (...skipping 12 matching lines...) Expand all Loading... |
| 386 compile_try_job['try_job_url'] = try_job_result['url'] | 410 compile_try_job['try_job_url'] = try_job_result['url'] |
| 387 compile_try_job['try_job_build_number'] = ( | 411 compile_try_job['try_job_build_number'] = ( |
| 388 _GetTryJobBuildNumber(try_job_result['url'])) | 412 _GetTryJobBuildNumber(try_job_result['url'])) |
| 389 if try_job_result.get('culprit', {}).get('compile'): | 413 if try_job_result.get('culprit', {}).get('compile'): |
| 390 compile_try_job['culprit'] = try_job_result['culprit']['compile'] | 414 compile_try_job['culprit'] = try_job_result['culprit']['compile'] |
| 391 | 415 |
| 392 culprit_info['compile']['try_jobs'].append(compile_try_job) | 416 culprit_info['compile']['try_jobs'].append(compile_try_job) |
| 393 return culprit_info | 417 return culprit_info |
| 394 | 418 |
| 395 | 419 |
| 396 def GetAllTryJobResults(master_name, builder_name, build_number): | 420 def GetAllTryJobResults(master_name, builder_name, build_number, |
| 421 show_debug_info=False): |
| 397 culprits_info = {} | 422 culprits_info = {} |
| 398 is_test_failure = True | 423 is_test_failure = True |
| 399 | 424 |
| 400 analysis_result, failure_result_map = _GetFailureResultMap( | 425 analysis_result, failure_result_map = _GetFailureResultMap( |
| 401 master_name, builder_name, build_number) | 426 master_name, builder_name, build_number) |
| 402 | 427 |
| 403 if failure_result_map: | 428 if failure_result_map: |
| 404 for step_name in failure_result_map: | 429 for step_name in failure_result_map: |
| 405 if step_name.lower() == 'compile': | 430 if step_name.lower() == 'compile': |
| 406 is_test_failure = False | 431 is_test_failure = False |
| 407 break | 432 break |
| 408 if is_test_failure: | 433 if is_test_failure: |
| 409 tasks_info = _GenerateSwarmingTasksData(failure_result_map) | 434 tasks_info = _GenerateSwarmingTasksData(failure_result_map) |
| 410 culprits_info = _GetAllTryJobResultsForTest( | 435 culprits_info = _GetAllTryJobResultsForTest( |
| 411 failure_result_map, tasks_info) | 436 failure_result_map, tasks_info, show_debug_info) |
| 412 else: | 437 else: |
| 413 culprits_info = _GetTryJobResultForCompile(failure_result_map) | 438 culprits_info = _GetTryJobResultForCompile(failure_result_map) |
| 414 elif analysis_result: | 439 elif analysis_result: |
| 415 for failure in analysis_result['failures']: | 440 for failure in analysis_result['failures']: |
| 416 step_name = failure['step_name'] | 441 step_name = failure['step_name'] |
| 417 tests = [] | 442 tests = [] |
| 418 for test in failure.get('tests', []): | 443 for test in failure.get('tests', []): |
| 419 tests.append(test['test_name']) | 444 tests.append(test['test_name']) |
| 420 | 445 |
| 421 culprits_info[step_name] = { | 446 culprits_info[step_name] = { |
| 422 'try_jobs': [ | 447 'try_jobs': [ |
| 423 { | 448 { |
| 424 'status': result_status.NO_FAILURE_RESULT_MAP, | 449 'status': result_status.NO_FAILURE_RESULT_MAP, |
| 425 'tests': tests | 450 'tests': tests |
| 426 } | 451 } |
| 427 ] | 452 ] |
| 428 } | 453 } |
| 454 |
| 429 return culprits_info | 455 return culprits_info |
| OLD | NEW |