| OLD | NEW |
| 1 # Copyright 2016 The Chromium Authors. All rights reserved. | 1 # Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 from collections import defaultdict | 5 from collections import defaultdict |
| 6 import copy | 6 import copy |
| 7 | 7 |
| 8 from handlers import result_status | 8 from handlers import result_status |
| 9 from model import analysis_status | 9 from model import analysis_status |
| 10 from model.wf_analysis import WfAnalysis | 10 from model.wf_analysis import WfAnalysis |
| (...skipping 174 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 185 revision = culprit['revision'] | 185 revision = culprit['revision'] |
| 186 if organized_culprits.get(revision): | 186 if organized_culprits.get(revision): |
| 187 organized_culprits[revision]['failed_tests'].append(test_name) | 187 organized_culprits[revision]['failed_tests'].append(test_name) |
| 188 else: | 188 else: |
| 189 organized_culprits[revision] = culprit | 189 organized_culprits[revision] = culprit |
| 190 organized_culprits[revision]['failed_tests'] = [test_name] | 190 organized_culprits[revision]['failed_tests'] = [test_name] |
| 191 | 191 |
| 192 return organized_culprits | 192 return organized_culprits |
| 193 | 193 |
| 194 | 194 |
| 195 def _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info, | 195 def _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info): |
| 196 show_debug_info): | |
| 197 referred_build_keys = try_job_key.split('/') | 196 referred_build_keys = try_job_key.split('/') |
| 198 try_job = WfTryJob.Get(*referred_build_keys) | 197 try_job = WfTryJob.Get(*referred_build_keys) |
| 199 | 198 |
| 200 if not try_job or try_job.compile_results: | 199 if not try_job or try_job.compile_results: |
| 201 return | 200 return |
| 202 | 201 |
| 203 try_job_result = try_job.test_results[-1] if try_job.test_results else None | 202 try_job_result = try_job.test_results[-1] if try_job.test_results else None |
| 204 | 203 |
| 205 for step_try_jobs in culprits_info.values(): | 204 for step_try_jobs in culprits_info.values(): |
| 206 # If try job found different culprits for each test, split tests by culprit. | 205 # If try job found different culprits for each test, split tests by culprit. |
| 207 additional_tests_culprit_info = [] | 206 additional_tests_culprit_info = [] |
| 208 | 207 |
| 209 for try_job_info in step_try_jobs['try_jobs']: | 208 for try_job_info in step_try_jobs['try_jobs']: |
| 210 if (try_job_key != try_job_info['try_job_key'] | 209 if (try_job_key != try_job_info['try_job_key'] |
| 211 or (try_job_info.get('status') and not show_debug_info)): | 210 or try_job_info.get('status')): |
| 212 # Conditions that try_job_info has status are: | 211 # Conditions that try_job_info has status are: |
| 213 # If there is no swarming task, there won't be try job; | 212 # If there is no swarming task, there won't be try job; |
| 214 # If the swarming task is not completed yet, there won't be try job yet; | 213 # If the swarming task is not completed yet, there won't be try job yet; |
| 215 # If there are flaky tests found, those tests will be marked as flaky, | 214 # If there are flaky tests found, those tests will be marked as flaky, |
| 216 # and no try job for them will be triggered. | 215 # and no try job for them will be triggered. |
| 217 # If a try job was force triggered by an admin. | |
| 218 continue | 216 continue |
| 219 | 217 |
| 220 try_job_info['status'] = try_job.status | 218 try_job_info['status'] = try_job.status |
| 221 if try_job_result: | 219 if try_job_result: |
| 222 # Needs to use ref_name to match step_name in try job. | 220 # Needs to use ref_name to match step_name in try job. |
| 223 ref_name = try_job_info['ref_name'] | 221 ref_name = try_job_info['ref_name'] |
| 224 # Saves try job information. | 222 # Saves try job information. |
| 225 if try_job_result.get('url'): # pragma: no cover | 223 if try_job_result.get('url'): # pragma: no cover |
| 226 try_job_info['try_job_url'] = try_job_result['url'] | 224 try_job_info['try_job_url'] = try_job_result['url'] |
| 227 try_job_info['try_job_build_number'] = ( | 225 try_job_info['try_job_build_number'] = ( |
| (...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 300 'try_job_key': 'm/b/121' | 298 'try_job_key': 'm/b/121' |
| 301 }, | 299 }, |
| 302 ... | 300 ... |
| 303 ] | 301 ] |
| 304 """ | 302 """ |
| 305 additional_flakiness_list = [] | 303 additional_flakiness_list = [] |
| 306 for try_job in try_jobs: | 304 for try_job in try_jobs: |
| 307 try_job_key = try_job['try_job_key'] | 305 try_job_key = try_job['try_job_key'] |
| 308 task = step_tasks_info.get('swarming_tasks', {}).get(try_job_key) | 306 task = step_tasks_info.get('swarming_tasks', {}).get(try_job_key) |
| 309 | 307 |
| 310 if (task['task_info']['status'] != analysis_status.COMPLETED and | 308 if task['task_info'].get('task_id'): |
| 311 not show_debug_info): | 309 try_job['task_id'] = task['task_info']['task_id'] |
| 310 try_job['task_url'] = task['task_info']['task_url'] |
| 311 |
| 312 if (task['task_info']['status'] != analysis_status.COMPLETED): |
| 312 # There is someting wrong with swarming task or it's not done yet, | 313 # There is someting wrong with swarming task or it's not done yet, |
| 313 # no try job yet or ever. | 314 # no try job yet or ever. |
| 314 try_job['status'] = result_status.NO_TRY_JOB_REASON_MAP[ | 315 try_job['status'] = result_status.NO_TRY_JOB_REASON_MAP[ |
| 315 task['task_info']['status']] | 316 task['task_info']['status']] |
| 316 try_job['tests'] = task.get('all_tests', []) | 317 try_job['tests'] = task.get('all_tests', []) |
| 318 |
| 319 if not show_debug_info: |
| 320 continue |
| 321 else: |
| 322 # TODO(lijeffrey): This is a hack to prevent unclassified failures |
| 323 # from showing up as reliable in debug view . As part of the refactoring |
| 324 # work categorizing failures and adding the ability to force trigger try |
| 325 # jobs independently should be considered. |
| 326 try_job['can_force'] = True |
| 327 |
| 328 # Swarming task is completed or a manual try job rerun was triggered. |
| 329 # Group tests according to task result. |
| 330 if task.get('ref_name'): |
| 331 try_job['ref_name'] = task['ref_name'] |
| 317 else: | 332 else: |
| 318 # Swarming task is completed or a manual try job rerun was triggered. | 333 # A try job was forced for a non-swarming step. |
| 319 # Group tests according to task result. | 334 if show_debug_info: # pragma: no branch |
| 320 if task.get('ref_name'): | 335 try_job['ref_name'] = step_name.split()[0] |
| 321 try_job['ref_name'] = task['ref_name'] | |
| 322 else: | |
| 323 # A try job was forced for a non-swarming step. | |
| 324 if show_debug_info: # pragma: no branch | |
| 325 try_job['ref_name'] = step_name.split()[0] | |
| 326 | 336 |
| 327 if task.get('reliable_tests'): | 337 if task.get('reliable_tests'): |
| 328 try_job['tests'] = task['reliable_tests'] | 338 try_job['tests'] = task['reliable_tests'] |
| 329 if task.get('flaky_tests'): | 339 if task.get('flaky_tests'): |
| 330 # Split this try job into two groups: flaky group and reliable group. | 340 # Split this try job into two groups: flaky group and reliable group. |
| 331 flaky_try_job = copy.deepcopy(try_job) | 341 flaky_try_job = copy.deepcopy(try_job) |
| 332 flaky_try_job['status'] = result_status.FLAKY | 342 flaky_try_job['status'] = result_status.FLAKY |
| 333 flaky_try_job['tests'] = task['flaky_tests'] | 343 flaky_try_job['tests'] = task['flaky_tests'] |
| 334 flaky_try_job['task_id'] = task['task_info']['task_id'] | 344 flaky_try_job['task_id'] = task['task_info']['task_id'] |
| 335 flaky_try_job['task_url'] = task['task_info']['task_url'] | 345 flaky_try_job['task_url'] = task['task_info']['task_url'] |
| 336 additional_flakiness_list.append(flaky_try_job) | 346 additional_flakiness_list.append(flaky_try_job) |
| 337 elif task.get('flaky_tests'): # pragma: no cover | 347 elif task.get('flaky_tests'): # pragma: no cover |
| 338 # All Flaky. | 348 # All Flaky. |
| 339 try_job['status'] = result_status.FLAKY | 349 try_job['status'] = result_status.FLAKY |
| 340 try_job['tests'] = task['flaky_tests'] | 350 try_job['tests'] = task['flaky_tests'] |
| 341 | |
| 342 if task['task_info'].get('task_id'): | |
| 343 try_job['task_id'] = task['task_info']['task_id'] | |
| 344 try_job['task_url'] = task['task_info']['task_url'] | |
| 345 | 351 |
| 346 try_jobs.extend(additional_flakiness_list) | 352 try_jobs.extend(additional_flakiness_list) |
| 347 | 353 |
| 348 | 354 |
| 349 def _GetAllTryJobResultsForTest(failure_result_map, tasks_info, | 355 def _GetAllTryJobResultsForTest(failure_result_map, tasks_info, |
| 350 show_debug_info=False): | 356 show_debug_info=False): |
| 351 culprits_info = defaultdict(lambda: defaultdict(list)) | 357 culprits_info = defaultdict(lambda: defaultdict(list)) |
| 358 |
| 352 if not tasks_info: | 359 if not tasks_info: |
| 353 return culprits_info | 360 return culprits_info |
| 354 | 361 |
| 355 try_job_keys = set() | 362 try_job_keys = set() |
| 356 | 363 |
| 357 for step_name, step_failure_result_map in failure_result_map.iteritems(): | 364 for step_name, step_failure_result_map in failure_result_map.iteritems(): |
| 358 try_jobs = culprits_info[step_name]['try_jobs'] | 365 try_jobs = culprits_info[step_name]['try_jobs'] |
| 359 if isinstance(step_failure_result_map, dict): | 366 if isinstance(step_failure_result_map, dict): |
| 360 step_try_job_keys = set() | 367 step_try_job_keys = set() |
| 361 for try_job_key in step_failure_result_map.values(): | 368 for try_job_key in step_failure_result_map.values(): |
| (...skipping 14 matching lines...) Expand all Loading... |
| 376 | 383 |
| 377 if show_debug_info: | 384 if show_debug_info: |
| 378 # Include any forced try jobs trigered manually in debug mode. | 385 # Include any forced try jobs trigered manually in debug mode. |
| 379 try_job_keys.add(step_failure_result_map) | 386 try_job_keys.add(step_failure_result_map) |
| 380 | 387 |
| 381 _UpdateTryJobInfoBasedOnSwarming(tasks_info[step_name], try_jobs, | 388 _UpdateTryJobInfoBasedOnSwarming(tasks_info[step_name], try_jobs, |
| 382 show_debug_info, | 389 show_debug_info, |
| 383 step_name) | 390 step_name) |
| 384 | 391 |
| 385 for try_job_key in try_job_keys: | 392 for try_job_key in try_job_keys: |
| 386 _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info, | 393 _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info) |
| 387 show_debug_info) | 394 |
| 388 return culprits_info | 395 return culprits_info |
| 389 | 396 |
| 390 | 397 |
| 391 def _GetTryJobResultForCompile(failure_result_map): | 398 def _GetTryJobResultForCompile(failure_result_map): |
| 392 try_job_key = failure_result_map['compile'] | 399 try_job_key = failure_result_map['compile'] |
| 393 referred_build_keys = try_job_key.split('/') | 400 referred_build_keys = try_job_key.split('/') |
| 394 culprit_info = defaultdict(lambda: defaultdict(list)) | 401 culprit_info = defaultdict(lambda: defaultdict(list)) |
| 395 | 402 |
| 396 try_job = WfTryJob.Get(*referred_build_keys) | 403 try_job = WfTryJob.Get(*referred_build_keys) |
| 397 if not try_job or try_job.test_results: | 404 if not try_job or try_job.test_results: |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 446 culprits_info[step_name] = { | 453 culprits_info[step_name] = { |
| 447 'try_jobs': [ | 454 'try_jobs': [ |
| 448 { | 455 { |
| 449 'status': result_status.NO_FAILURE_RESULT_MAP, | 456 'status': result_status.NO_FAILURE_RESULT_MAP, |
| 450 'tests': tests | 457 'tests': tests |
| 451 } | 458 } |
| 452 ] | 459 ] |
| 453 } | 460 } |
| 454 | 461 |
| 455 return culprits_info | 462 return culprits_info |
| OLD | NEW |