Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright 2016 The Chromium Authors. All rights reserved. | 1 # Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 from collections import defaultdict | 5 from collections import defaultdict |
| 6 import copy | 6 import copy |
| 7 | 7 |
| 8 from handlers import result_status | 8 from handlers import result_status |
| 9 from model import analysis_status | 9 from model import analysis_status |
| 10 from model.wf_analysis import WfAnalysis | 10 from model.wf_analysis import WfAnalysis |
| (...skipping 174 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 185 revision = culprit['revision'] | 185 revision = culprit['revision'] |
| 186 if organized_culprits.get(revision): | 186 if organized_culprits.get(revision): |
| 187 organized_culprits[revision]['failed_tests'].append(test_name) | 187 organized_culprits[revision]['failed_tests'].append(test_name) |
| 188 else: | 188 else: |
| 189 organized_culprits[revision] = culprit | 189 organized_culprits[revision] = culprit |
| 190 organized_culprits[revision]['failed_tests'] = [test_name] | 190 organized_culprits[revision]['failed_tests'] = [test_name] |
| 191 | 191 |
| 192 return organized_culprits | 192 return organized_culprits |
| 193 | 193 |
| 194 | 194 |
| 195 def _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info): | 195 def _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info, |
| 196 show_debug_info): | |
| 196 referred_build_keys = try_job_key.split('/') | 197 referred_build_keys = try_job_key.split('/') |
| 197 try_job = WfTryJob.Get(*referred_build_keys) | 198 try_job = WfTryJob.Get(*referred_build_keys) |
| 199 | |
| 198 if not try_job or try_job.compile_results: | 200 if not try_job or try_job.compile_results: |
| 199 return | 201 return |
| 200 | 202 |
| 201 try_job_result = try_job.test_results[-1] if try_job.test_results else None | 203 try_job_result = try_job.test_results[-1] if try_job.test_results else None |
| 202 | 204 |
| 203 for step_try_jobs in culprits_info.values(): | 205 for step_try_jobs in culprits_info.values(): |
| 204 # If try job found different culprits for each test, split tests by culprit. | 206 # If try job found different culprits for each test, split tests by culprit. |
| 205 additional_tests_culprit_info = [] | 207 additional_tests_culprit_info = [] |
| 208 | |
| 206 for try_job_info in step_try_jobs['try_jobs']: | 209 for try_job_info in step_try_jobs['try_jobs']: |
| 207 if (try_job_key != try_job_info['try_job_key'] | 210 if (try_job_key != try_job_info['try_job_key'] |
| 208 or try_job_info.get('status')): | 211 or (try_job_info.get('status') and not show_debug_info)): |
| 209 # Conditions that try_job_info has status are: | 212 # Conditions that try_job_info has status are: |
| 210 # If there is no swarming task, there won't be try job; | 213 # If there is no swarming task, there won't be try job; |
| 211 # If the swarming task is not completed yet, there won't be try job yet; | 214 # If the swarming task is not completed yet, there won't be try job yet; |
| 212 # If there are flaky tests found, those tests will be marked as flaky, | 215 # If there are flaky tests found, those tests will be marked as flaky, |
| 213 # and no try job for them will be triggered. | 216 # and no try job for them will be triggered. |
| 214 continue | 217 continue |
| 215 | 218 |
| 216 try_job_info['status'] = try_job.status | 219 try_job_info['status'] = try_job.status |
| 217 if try_job_result: | 220 if try_job_result: |
| 218 # Needs to use ref_name to match step_name in try job. | 221 # Needs to use ref_name to match step_name in try job. |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 272 'review_url': iterate_culprit.get( | 275 'review_url': iterate_culprit.get( |
| 273 'url', iterate_culprit.get('review_url', None)) | 276 'url', iterate_culprit.get('review_url', None)) |
| 274 } | 277 } |
| 275 tmp_try_job_info['tests'] = iterate_culprit['failed_tests'] | 278 tmp_try_job_info['tests'] = iterate_culprit['failed_tests'] |
| 276 additional_tests_culprit_info.append(tmp_try_job_info) | 279 additional_tests_culprit_info.append(tmp_try_job_info) |
| 277 | 280 |
| 278 if additional_tests_culprit_info: | 281 if additional_tests_culprit_info: |
| 279 step_try_jobs['try_jobs'].extend(additional_tests_culprit_info) | 282 step_try_jobs['try_jobs'].extend(additional_tests_culprit_info) |
| 280 | 283 |
| 281 | 284 |
| 282 def _UpdateTryJobInfoBasedOnSwarming(step_tasks_info, try_jobs): | 285 def _UpdateTryJobInfoBasedOnSwarming(step_tasks_info, try_jobs, |
| 286 show_debug_info=False): | |
| 283 """ | 287 """ |
| 284 Args: | 288 Args: |
| 285 step_tasks_info (dict): A dict of swarming task info for this step. | 289 step_tasks_info (dict): A dict of swarming task info for this step. |
| 286 It is the result from _GenerateSwarmingTasksData. | 290 It is the result from _GenerateSwarmingTasksData. |
| 287 try_jobs (list): A list to save try job data for the step, format as below: | 291 try_jobs (list): A list to save try job data for the step, format as below: |
| 288 [ | 292 [ |
| 289 { | 293 { |
| 290 'try_job_key': 'm/b/120' | 294 'try_job_key': 'm/b/120' |
| 291 }, | 295 }, |
| 292 { | 296 { |
| 293 'try_job_key': 'm/b/121' | 297 'try_job_key': 'm/b/121' |
| 294 }, | 298 }, |
| 295 ... | 299 ... |
| 296 ] | 300 ] |
| 297 """ | 301 """ |
| 298 additional_flakiness_list = [] | 302 additional_flakiness_list = [] |
| 299 for try_job in try_jobs: | 303 for try_job in try_jobs: |
| 300 try_job_key = try_job['try_job_key'] | 304 try_job_key = try_job['try_job_key'] |
| 301 task = step_tasks_info.get('swarming_tasks', {}).get(try_job_key) | 305 task = step_tasks_info.get('swarming_tasks', {}).get(try_job_key) |
| 302 | 306 |
| 303 if task['task_info']['status'] != analysis_status.COMPLETED: | 307 if (task['task_info']['status'] != analysis_status.COMPLETED and |
| 308 not show_debug_info): | |
| 304 # There is someting wrong with swarming task or it's not done yet, | 309 # There is someting wrong with swarming task or it's not done yet, |
| 305 # no try job yet or ever. | 310 # no try job yet or ever. |
| 306 try_job['status'] = result_status.NO_TRY_JOB_REASON_MAP[ | 311 try_job['status'] = result_status.NO_TRY_JOB_REASON_MAP[ |
| 307 task['task_info']['status']] | 312 task['task_info']['status']] |
| 308 try_job['tests'] = task.get('all_tests', []) | 313 try_job['tests'] = task.get('all_tests', []) |
| 309 else: | 314 else: |
| 310 # Swarming task is completed, group tests according to task result. | 315 # Swarming task is completed or a manual try job rerun was triggered. |
| 311 try_job['ref_name'] = task['ref_name'] | 316 # Group tests according to task result. |
| 317 try_job['ref_name'] = task.get('ref_name') | |
|
chanli
2016/06/02 23:32:52
If this is a force-try-job for a non-swarming task
lijeffrey
2016/06/27 22:15:43
Done.
| |
| 312 if task.get('reliable_tests'): | 318 if task.get('reliable_tests'): |
| 313 try_job['tests'] = task['reliable_tests'] | 319 try_job['tests'] = task['reliable_tests'] |
| 314 if task.get('flaky_tests'): | 320 if task.get('flaky_tests'): |
| 315 # Split this try job into two groups: flaky group and reliable group. | 321 # Split this try job into two groups: flaky group and reliable group. |
| 316 flaky_try_job = copy.deepcopy(try_job) | 322 flaky_try_job = copy.deepcopy(try_job) |
| 317 flaky_try_job['status'] = result_status.FLAKY | 323 flaky_try_job['status'] = result_status.FLAKY |
| 318 flaky_try_job['tests'] = task['flaky_tests'] | 324 flaky_try_job['tests'] = task['flaky_tests'] |
| 319 flaky_try_job['task_id'] = task['task_info']['task_id'] | 325 flaky_try_job['task_id'] = task['task_info']['task_id'] |
| 320 flaky_try_job['task_url'] = task['task_info']['task_url'] | 326 flaky_try_job['task_url'] = task['task_info']['task_url'] |
| 321 additional_flakiness_list.append(flaky_try_job) | 327 additional_flakiness_list.append(flaky_try_job) |
| 322 elif task.get('flaky_tests'): # pragma: no cover | 328 elif task.get('flaky_tests'): # pragma: no cover |
| 323 # All Flaky. | 329 # All Flaky. |
| 324 try_job['status'] = result_status.FLAKY | 330 try_job['status'] = result_status.FLAKY |
| 325 try_job['tests'] = task['flaky_tests'] | 331 try_job['tests'] = task['flaky_tests'] |
| 326 | 332 |
| 327 if task['task_info'].get('task_id'): | 333 if task['task_info'].get('task_id'): |
| 328 try_job['task_id'] = task['task_info']['task_id'] | 334 try_job['task_id'] = task['task_info']['task_id'] |
| 329 try_job['task_url'] = task['task_info']['task_url'] | 335 try_job['task_url'] = task['task_info']['task_url'] |
| 330 | 336 |
| 331 try_jobs.extend(additional_flakiness_list) | 337 try_jobs.extend(additional_flakiness_list) |
| 332 | 338 |
| 333 | 339 |
| 334 def _GetAllTryJobResultsForTest(failure_result_map, tasks_info): | 340 def _GetAllTryJobResultsForTest(failure_result_map, tasks_info, |
| 341 show_debug_info=False): | |
| 335 culprits_info = defaultdict(lambda: defaultdict(list)) | 342 culprits_info = defaultdict(lambda: defaultdict(list)) |
| 336 if not tasks_info: | 343 if not tasks_info: |
| 337 return culprits_info | 344 return culprits_info |
| 338 | 345 |
| 339 try_job_keys = set() | 346 try_job_keys = set() |
| 340 for step_name, step_failure_result_map in failure_result_map.iteritems(): | 347 for step_name, step_failure_result_map in failure_result_map.iteritems(): |
| 341 try_jobs = culprits_info[step_name]['try_jobs'] | 348 try_jobs = culprits_info[step_name]['try_jobs'] |
| 342 | 349 |
| 343 if isinstance(step_failure_result_map, dict): | 350 if isinstance(step_failure_result_map, dict): |
| 344 step_try_job_keys = set() | 351 step_try_job_keys = set() |
| 345 for try_job_key in step_failure_result_map.values(): | 352 for try_job_key in step_failure_result_map.values(): |
| 346 if try_job_key not in step_try_job_keys: | 353 if try_job_key not in step_try_job_keys: |
| 347 try_job_dict = { | 354 try_job_dict = { |
| 348 'try_job_key': try_job_key | 355 'try_job_key': try_job_key |
| 349 } | 356 } |
| 350 try_jobs.append(try_job_dict) | 357 try_jobs.append(try_job_dict) |
| 351 step_try_job_keys.add(try_job_key) | 358 step_try_job_keys.add(try_job_key) |
| 352 try_job_keys.update(step_try_job_keys) | 359 try_job_keys.update(step_try_job_keys) |
| 353 else: | 360 else: |
| 354 # Try job should only be triggered for swarming tests, because we cannot | 361 # Try job should only be triggered for swarming tests, because we cannot |
| 355 # identify flaky tests for non-swarming tests. | 362 # identify flaky tests for non-swarming tests. |
| 356 try_job_dict = { | 363 try_job_dict = { |
| 357 'try_job_key': step_failure_result_map | 364 'try_job_key': step_failure_result_map |
| 358 } | 365 } |
| 359 try_jobs.append(try_job_dict) | 366 try_jobs.append(try_job_dict) |
| 360 | 367 |
| 361 _UpdateTryJobInfoBasedOnSwarming(tasks_info[step_name], try_jobs) | 368 if show_debug_info: |
| 369 # Include any forced try jobs trigered manually in debug mode. | |
| 370 try_job_keys.add(step_failure_result_map) | |
| 371 | |
| 372 _UpdateTryJobInfoBasedOnSwarming(tasks_info[step_name], try_jobs, | |
| 373 show_debug_info) | |
| 362 | 374 |
| 363 for try_job_key in try_job_keys: | 375 for try_job_key in try_job_keys: |
| 364 _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info) | 376 _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info, |
| 377 show_debug_info) | |
| 365 | 378 |
| 366 return culprits_info | 379 return culprits_info |
| 367 | 380 |
| 368 | 381 |
| 369 def _GetTryJobResultForCompile(failure_result_map): | 382 def _GetTryJobResultForCompile(failure_result_map): |
| 370 try_job_key = failure_result_map['compile'] | 383 try_job_key = failure_result_map['compile'] |
| 371 referred_build_keys = try_job_key.split('/') | 384 referred_build_keys = try_job_key.split('/') |
| 372 culprit_info = defaultdict(lambda: defaultdict(list)) | 385 culprit_info = defaultdict(lambda: defaultdict(list)) |
| 373 | 386 |
| 374 try_job = WfTryJob.Get(*referred_build_keys) | 387 try_job = WfTryJob.Get(*referred_build_keys) |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 388 compile_try_job['try_job_url'] = try_job_result['url'] | 401 compile_try_job['try_job_url'] = try_job_result['url'] |
| 389 compile_try_job['try_job_build_number'] = ( | 402 compile_try_job['try_job_build_number'] = ( |
| 390 _GetTryJobBuildNumber(try_job_result['url'])) | 403 _GetTryJobBuildNumber(try_job_result['url'])) |
| 391 if try_job_result.get('culprit', {}).get('compile'): | 404 if try_job_result.get('culprit', {}).get('compile'): |
| 392 compile_try_job['culprit'] = try_job_result['culprit']['compile'] | 405 compile_try_job['culprit'] = try_job_result['culprit']['compile'] |
| 393 | 406 |
| 394 culprit_info['compile']['try_jobs'].append(compile_try_job) | 407 culprit_info['compile']['try_jobs'].append(compile_try_job) |
| 395 return culprit_info | 408 return culprit_info |
| 396 | 409 |
| 397 | 410 |
| 398 def GetAllTryJobResults(master_name, builder_name, build_number): | 411 def GetAllTryJobResults(master_name, builder_name, build_number, |
| 412 show_debug_info=False): | |
| 399 culprits_info = {} | 413 culprits_info = {} |
| 400 is_test_failure = True | 414 is_test_failure = True |
| 401 | 415 |
| 402 analysis_result, failure_result_map = _GetFailureResultMap( | 416 analysis_result, failure_result_map = _GetFailureResultMap( |
| 403 master_name, builder_name, build_number) | 417 master_name, builder_name, build_number) |
| 404 | 418 |
| 405 if failure_result_map: | 419 if failure_result_map: |
| 406 for step_name in failure_result_map: | 420 for step_name in failure_result_map: |
| 407 if step_name.lower() == 'compile': | 421 if step_name.lower() == 'compile': |
| 408 is_test_failure = False | 422 is_test_failure = False |
| 409 break | 423 break |
| 410 if is_test_failure: | 424 if is_test_failure: |
| 411 tasks_info = _GenerateSwarmingTasksData(failure_result_map) | 425 tasks_info = _GenerateSwarmingTasksData(failure_result_map) |
| 412 culprits_info = _GetAllTryJobResultsForTest( | 426 culprits_info = _GetAllTryJobResultsForTest( |
| 413 failure_result_map, tasks_info) | 427 failure_result_map, tasks_info, show_debug_info) |
| 414 else: | 428 else: |
| 415 culprits_info = _GetTryJobResultForCompile(failure_result_map) | 429 culprits_info = _GetTryJobResultForCompile(failure_result_map) |
| 416 elif analysis_result: | 430 elif analysis_result: |
| 417 for failure in analysis_result['failures']: | 431 for failure in analysis_result['failures']: |
| 418 step_name = failure['step_name'] | 432 step_name = failure['step_name'] |
| 419 tests = [] | 433 tests = [] |
| 420 for test in failure.get('tests', []): | 434 for test in failure.get('tests', []): |
| 421 tests.append(test['test_name']) | 435 tests.append(test['test_name']) |
| 422 | 436 |
| 423 culprits_info[step_name] = { | 437 culprits_info[step_name] = { |
| 424 'try_jobs': [ | 438 'try_jobs': [ |
| 425 { | 439 { |
| 426 'status': result_status.NO_FAILURE_RESULT_MAP, | 440 'status': result_status.NO_FAILURE_RESULT_MAP, |
| 427 'tests': tests | 441 'tests': tests |
| 428 } | 442 } |
| 429 ] | 443 ] |
| 430 } | 444 } |
| 431 return culprits_info | 445 return culprits_info |
| OLD | NEW |