| Index: appengine/findit/handlers/handlers_util.py
|
| diff --git a/appengine/findit/handlers/handlers_util.py b/appengine/findit/handlers/handlers_util.py
|
| index 105c24ba6fb5f654f347f9682eaca4fb166c0249..fe3b67e872e411b739a8285f5c51c0a2a9954728 100644
|
| --- a/appengine/findit/handlers/handlers_util.py
|
| +++ b/appengine/findit/handlers/handlers_util.py
|
| @@ -5,6 +5,7 @@
|
| from collections import defaultdict
|
| import copy
|
|
|
| +from handlers import result_status
|
| from model import wf_analysis_status
|
| from model.wf_analysis import WfAnalysis
|
| from model.wf_swarming_task import WfSwarmingTask
|
| @@ -13,101 +14,132 @@ from waterfall import buildbot
|
| from waterfall import waterfall_config
|
|
|
|
|
| -FLAKY = 'Flaky'
|
| +def _GetFailureResultMap(master_name, builder_name, build_number):
|
| + analysis = WfAnalysis.Get(master_name, builder_name, build_number)
|
| + if not analysis:
|
| + return None, None
|
| +
|
| + return analysis.result, analysis.failure_result_map
|
| +
|
|
|
| +def _GetAllTestsForASwarmingTask(task_key, step_failure_result_map):
|
| + all_tests = set()
|
| + for test_name, test_task_key in step_failure_result_map.iteritems():
|
| + if task_key == test_task_key:
|
| + all_tests.add(test_name)
|
| + return list(all_tests)
|
|
|
| -def GenerateSwarmingTasksData(master_name, builder_name, build_number):
|
| +
|
| +def _GenerateSwarmingTasksData(failure_result_map):
|
| """Collects info for all related swarming tasks.
|
|
|
| Returns: A dict as below:
|
| {
|
| 'step1': {
|
| - 'swarming_tasks': [
|
| - {
|
| - 'status': 'Completed',
|
| - 'task_id': 'task1',
|
| - 'task_url': (
|
| - 'https://chromium-swarm.appspot.com/user/task/task1'),
|
| - 'tests': ['test2']
|
| - },
|
| - {
|
| - 'status': 'Completed',
|
| - 'task_id': 'task0',
|
| - 'task_url': (
|
| - 'https://chromium-swarm.appspot.com/user/task/task0'),
|
| - 'tests': ['test1']
|
| - }
|
| - ],
|
| - 'tests': {
|
| - 'test1': {
|
| - 'status': 'Completed',
|
| - 'task_id': 'task0',
|
| - 'task_url': (
|
| - 'https://chromium-swarm.appspot.com/user/task/task0')
|
| - },
|
| - 'test2': {
|
| - 'status': 'Completed',
|
| - 'task_id': 'task1',
|
| - 'task_url': (
|
| - 'https://chromium-swarm.appspot.com/user/task/task1')
|
| + 'swarming_tasks': {
|
| + 'm/b/121': {
|
| + 'task_info': {
|
| + 'status': 'Completed',
|
| + 'task_id': 'task1',
|
| + 'task_url': ('https://chromium-swarm.appspot.com/user'
|
| + '/task/task1')
|
| + },
|
| + 'all_tests': ['test2', 'test3', 'test4'],
|
| + 'reliable_tests': ['test2'],
|
| + 'flaky_tests': ['test3', 'test4']
|
| }
|
| }
|
| },
|
| 'step2': {
|
| - 'swarming_tasks': [
|
| - {
|
| - 'status': 'Pending'
|
| + 'swarming_tasks': {
|
| + 'm/b/121': {
|
| + 'task_info': {
|
| + 'status': 'Pending'
|
| + },
|
| + 'all_tests': ['test1']
|
| }
|
| - ],
|
| - 'tests': {
|
| - 'test1': {
|
| - 'status': 'Pending'
|
| + }
|
| + },
|
| + 'step3': {
|
| + 'swarming_tasks': {
|
| + 'm/b/121': {
|
| + 'task_info': {
|
| + 'status': 'No swarming rerun found'
|
| + },
|
| + 'all_tests': ['test1']
|
| }
|
| }
|
| }
|
| }
|
| """
|
| - tasks_info = defaultdict(dict)
|
|
|
| - analysis = WfAnalysis.Get(master_name, builder_name, build_number)
|
| - if not analysis:
|
| - return tasks_info
|
| + tasks_info = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
|
|
|
| - failure_result_map = analysis.failure_result_map
|
| - if failure_result_map:
|
| - for step_name, failure in failure_result_map.iteritems():
|
| - if isinstance(failure, dict):
|
| - # Only trigger swarming task for swarming test failures.
|
| - key_test_map = defaultdict(list)
|
| - for test_name, first_failure_key in failure.iteritems():
|
| - key_test_map[first_failure_key].append(test_name)
|
| -
|
| - tasks_info[step_name]['swarming_tasks'] = []
|
| - tasks_info[step_name]['tests'] = defaultdict(dict)
|
| - step_tasks_info = tasks_info[step_name]['swarming_tasks']
|
| - tests = tasks_info[step_name]['tests']
|
| - for key, test_names in key_test_map.iteritems():
|
| - referred_build_keys = key.split('/')
|
| - task = WfSwarmingTask.Get(*referred_build_keys, step_name=step_name)
|
| - if not task:
|
| - continue
|
| + swarming_server = waterfall_config.GetSwarmingSettings()['server_host']
|
| + for step_name, failure in failure_result_map.iteritems():
|
| + step_tasks_info = tasks_info[step_name]['swarming_tasks']
|
| + if isinstance(failure, dict):
|
| + # Only swarming test failures have swarming re-runs.
|
| + swarming_task_keys = set(failure.values())
|
| +
|
| + for key in swarming_task_keys:
|
| + task_dict = step_tasks_info[key]
|
| + referred_build_keys = key.split('/')
|
| + task = WfSwarmingTask.Get(*referred_build_keys, step_name=step_name)
|
| + if not task: # In case task got manually removed from data store.
|
| task_info = {
|
| - 'status': wf_analysis_status.SWARMING_STATUS_TO_DESCRIPTION.get(
|
| - task.status)
|
| + 'status': result_status.NO_SWARMING_TASK_FOUND
|
| }
|
| - if task.task_id:
|
| + task_dict['all_tests'] = _GetAllTestsForASwarmingTask(key, failure)
|
| + else:
|
| + task_info = {
|
| + 'status': task.status
|
| + }
|
| +
|
| + task_dict['all_tests'] = (
|
| + _GetAllTestsForASwarmingTask(key, failure)
|
| + if not (task.parameters and task.parameters.get('tests'))
|
| + else task.parameters['tests'])
|
| +
|
| + # Get the step name without platform.
|
| + # This value should have been saved in task.parameters;
|
| + # in case of no such value saved, split the step_name.
|
| + task_dict['ref_name'] = (
|
| + step_name.split()[0]
|
| + if not task.parameters or not task.parameters.get('ref_name')
|
| + else task.parameters['ref_name'])
|
| +
|
| + if task.task_id: # Swarming rerun has started.
|
| task_info['task_id'] = task.task_id
|
| task_info['task_url'] = 'https://%s/user/task/%s' % (
|
| - waterfall_config.GetSwarmingSettings()['server_host'],
|
| - task.task_id)
|
| + swarming_server, task.task_id)
|
| + if task.classified_tests:
|
| + # Swarming rerun has completed.
|
| + # Use its result to get reliable and flaky tests.
|
| + # If task has not completed, there will be no try job yet,
|
| + # the result will be grouped in unclassified failures temporarily.
|
| + task_dict['reliable_tests'] = task.classified_tests.get(
|
| + 'reliable_tests', [])
|
| + task_dict['flaky_tests'] = task.classified_tests.get(
|
| + 'flaky_tests', [])
|
| +
|
| + task_dict['task_info'] = task_info
|
| + else:
|
| + step_tasks_info[failure] = {
|
| + 'task_info': {
|
| + 'status': result_status.NON_SWARMING_NO_RERUN
|
| + }
|
| + }
|
|
|
| - for test_name in test_names:
|
| - tests[test_name] = copy.deepcopy(task_info)
|
| + return tasks_info
|
|
|
| - task_info['tests'] = test_names
|
| - step_tasks_info.append(task_info)
|
|
|
| - return tasks_info
|
| +def GetSwarmingTaskInfo(master_name, builder_name, build_number):
|
| + _, failure_result_map = _GetFailureResultMap(
|
| + master_name, builder_name, build_number)
|
| + return (
|
| + _GenerateSwarmingTasksData(failure_result_map)
|
| + if failure_result_map else {})
|
|
|
|
|
| def _GetTryJobBuildNumber(url):
|
| @@ -115,138 +147,283 @@ def _GetTryJobBuildNumber(url):
|
| return build_keys[2]
|
|
|
|
|
| -def _GetCulpritInfoForTryJobResult(try_job_key, culprits_info):
|
| +def _OrganizeTryJobResultByCulprits(try_job_culprits):
|
| + """Re-organize try job culprits by revision.
|
| +
|
| + Args:
|
| + try_job_culprits (dict): A dict of culprits for one step organized by test:
|
| + {
|
| + 'tests': {
|
| + 'a_test1': {
|
| + 'revision': 'rev1',
|
| + 'commit_position': '1',
|
| + 'review_url': 'url_1'
|
| + },
|
| + 'a_test2': {
|
| + 'revision': 'rev1',
|
| + 'commit_position': '1',
|
| + 'review_url': 'url_1'
|
| + }
|
| + }
|
| + }
|
| + Returns:
|
| + A dict of culprits for one step organized by revison:
|
| + {
|
| + 'rev1': {
|
| + 'revision': 'rev1',
|
| + 'commit_position': '1',
|
| + 'review_url': 'url_1',
|
| + 'tests': ['a_test1', 'a_test2']
|
| + }
|
| + }
|
| + """
|
| + if not try_job_culprits or not try_job_culprits.get('tests'):
|
| + return {}
|
| +
|
| + organized_culprits = {}
|
| + for test_name, culprit in try_job_culprits['tests'].iteritems():
|
| + revision = culprit['revision']
|
| + if organized_culprits.get(revision):
|
| + organized_culprits[revision]['failed_tests'].append(test_name)
|
| + else:
|
| + organized_culprits[revision] = culprit
|
| + organized_culprits[revision]['failed_tests'] = [test_name]
|
| +
|
| + return organized_culprits
|
| +
|
| +
|
| +def _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info):
|
| referred_build_keys = try_job_key.split('/')
|
| try_job = WfTryJob.Get(*referred_build_keys)
|
| - if not try_job:
|
| + if not try_job or try_job.compile_results:
|
| return
|
|
|
| - if try_job.compile_results:
|
| - try_job_result = try_job.compile_results[-1]
|
| - elif try_job.test_results:
|
| - try_job_result = try_job.test_results[-1]
|
| - else:
|
| - try_job_result = None
|
| -
|
| - additional_tests_culprit_info = {}
|
| - for culprit_info in culprits_info.values():
|
| - if culprit_info['try_job_key'] != try_job_key:
|
| - continue
|
| -
|
| - # Only include try job result for reliable tests.
|
| - # Flaky tests have been marked as 'Flaky'.
|
| - culprit_info['status'] = (
|
| - wf_analysis_status.TRY_JOB_STATUS_TO_DESCRIPTION[try_job.status]
|
| - if not culprit_info.get('status') else culprit_info['status'])
|
| -
|
| - if try_job_result and culprit_info['status'] != FLAKY:
|
| - if try_job_result.get('url'):
|
| - culprit_info['try_job_url'] = try_job_result['url']
|
| - culprit_info['try_job_build_number'] = (
|
| - _GetTryJobBuildNumber(try_job_result['url']))
|
| - if try_job_result.get('culprit'):
|
| - try_job_culprits = try_job_result['culprit']
|
| - step = culprit_info.get('step_no_platform', culprit_info['step_name'])
|
| - test = culprit_info['test_name']
|
| -
|
| - if test == 'N/A': # Only step level.
|
| - if try_job_culprits.get(step, {}).get('tests'):
|
| - # try job results has specified tests.
|
| - step_culprits = try_job_culprits[step]['tests']
|
| - for test_name, try_job_culprit in step_culprits.iteritems():
|
| - additional_test_key = '%s-%s' % (step, test_name)
|
| - additional_tests_culprit_info[additional_test_key] = {
|
| - 'step_name': step,
|
| - 'test_name': test_name,
|
| - 'try_job_key': try_job_key,
|
| - 'status': culprit_info['status'],
|
| - 'try_job_url': culprit_info['try_job_url'],
|
| - 'try_job_build_number': culprit_info['try_job_build_number'],
|
| - 'revision': try_job_culprit.get('revision'),
|
| - 'commit_position': try_job_culprit.get('commit_position'),
|
| - 'review_url': try_job_culprit.get('review_url')
|
| - }
|
| - continue
|
| + try_job_result = try_job.test_results[-1] if try_job.test_results else None
|
| +
|
| + for step_try_jobs in culprits_info.values():
|
| + # If try job found different culprits for each test, split tests by culprit.
|
| + additional_tests_culprit_info = []
|
| + for try_job_info in step_try_jobs['try_jobs']:
|
| + if (try_job_key != try_job_info['try_job_key']
|
| + or try_job_info.get('status')):
|
| + # Conditions that try_job_info has status are:
|
| + # If there is no swarming task, there won't be try job;
|
| + # If the swarming task is not completed yet, there won't be try job yet;
|
| + # If there are flaky tests found, those tests will be marked as flaky,
|
| + # and no try job for them will be triggered.
|
| + continue
|
| +
|
| + try_job_info['status'] = try_job.status
|
| + if try_job_result:
|
| + # Needs to use ref_name to match step_name in try job.
|
| + ref_name = try_job_info['ref_name']
|
| + # Saves try job information.
|
| + if try_job_result.get('url'): # pragma: no cover
|
| + try_job_info['try_job_url'] = try_job_result['url']
|
| + try_job_info['try_job_build_number'] = (
|
| + _GetTryJobBuildNumber(try_job_result['url']))
|
| +
|
| + if (try_job_result.get('culprit') and
|
| + try_job_result['culprit'].get(ref_name)):
|
| + # Saves try job culprits information.
|
| +
|
| + # Uses culprits to group tests.
|
| + culprit_tests_map = _OrganizeTryJobResultByCulprits(
|
| + try_job_result['culprit'][ref_name])
|
| + unrgouped_tests = try_job_info['tests']
|
| + list_of_culprits = []
|
| + for culprit_info in culprit_tests_map.values():
|
| + failed_tests = culprit_info['failed_tests']
|
| + list_of_culprits.append(culprit_info)
|
| + # Gets tests that haven't been grouped.
|
| + unrgouped_tests = list(
|
| + set(unrgouped_tests) ^ set(failed_tests))
|
| + if not unrgouped_tests:
|
| + # All tests have been grouped.
|
| + break
|
| +
|
| + index_start = 1
|
| + if unrgouped_tests:
|
| + # There are tests don't have try job culprits.
|
| + # Group these tests together.
|
| + # Save them in current try_job_info.
|
| + try_job_info['tests'] = unrgouped_tests
|
| + try_job_info['culprit'] = {}
|
| + # Saves all the tests that have culprits later.
|
| + index_start = 0
|
| else:
|
| - # For historical culprit found by try job for compile,
|
| - # step name is not recorded.
|
| - culprit = try_job_culprits.get(step) or try_job_culprits
|
| - elif test in try_job_culprits.get(step, {}).get('tests'):
|
| - culprit = try_job_culprits[step]['tests'][test]
|
| - else: # pragma: no cover
|
| - continue # No culprit for test found.
|
| -
|
| - culprit_info['revision'] = culprit.get('revision')
|
| - culprit_info['commit_position'] = culprit.get('commit_position')
|
| - culprit_info['review_url'] = culprit.get('review_url')
|
| -
|
| - if additional_tests_culprit_info:
|
| - for key, test_culprit_info in additional_tests_culprit_info.iteritems():
|
| - culprits_info.pop(test_culprit_info['step_name'], None)
|
| - culprits_info[key] = test_culprit_info
|
| -
|
| -
|
| -def _UpdateTryJobCulpritUsingSwarmingTask(
|
| - step_name, failure_key_set, culprits_info):
|
| - for failure_key in failure_key_set:
|
| - build_keys = failure_key.split('/')
|
| - task = WfSwarmingTask.Get(*build_keys, step_name=step_name)
|
| - if not task:
|
| - continue
|
| - classified_tests = task.classified_tests
|
| - step_no_platform = task.parameters.get(
|
| - 'ref_name', step_name.split()[0])
|
| - for culprit_info in culprits_info.values():
|
| - if (culprit_info['try_job_key'] == failure_key and
|
| - step_name == culprit_info['step_name']):
|
| - culprit_info['step_no_platform'] = step_no_platform
|
| - if culprit_info['test_name'] in classified_tests.get('flaky_tests', []):
|
| - culprit_info['status'] = FLAKY
|
| + # Saves the first culprit in current try_job_info.
|
| + # Saves all the other culprits later.
|
| + try_job_info['culprit'] = {
|
| + 'revision': list_of_culprits[0]['revision'],
|
| + 'commit_position': list_of_culprits[0]['commit_position'],
|
| + 'review_url': list_of_culprits[0]['review_url']
|
| + }
|
| + try_job_info['tests'] = list_of_culprits[0]['failed_tests']
|
| +
|
| + for n in xrange(index_start, len(list_of_culprits)):
|
| + # Appends the rest of test groups to step_try_jobs['try_jobs'].
|
| + iterate_culprit = list_of_culprits[n]
|
| + tmp_try_job_info = copy.deepcopy(try_job_info)
|
| + tmp_try_job_info['culprit'] = {
|
| + 'revision': iterate_culprit['revision'],
|
| + 'commit_position': iterate_culprit['commit_position'],
|
| + 'review_url': iterate_culprit['review_url']
|
| + }
|
| + tmp_try_job_info['tests'] = iterate_culprit['failed_tests']
|
| + additional_tests_culprit_info.append(tmp_try_job_info)
|
| +
|
| + if additional_tests_culprit_info:
|
| + step_try_jobs['try_jobs'].extend(additional_tests_culprit_info)
|
| +
|
| +
|
| +def _UpdateTryJobInfoBasedOnSwarming(step_tasks_info, try_jobs):
|
| + """
|
| + Args:
|
| + step_tasks_info (dict): A dict of swarming task info for this step.
|
| + It is the result from _GenerateSwarmingTasksData.
|
| + try_jobs (list): A list to save try job data for the step, format as below:
|
| + [
|
| + {
|
| + 'try_job_key': 'm/b/120'
|
| + },
|
| + {
|
| + 'try_job_key': 'm/b/121'
|
| + },
|
| + ...
|
| + ]
|
| + """
|
| + additional_flakiness_list = []
|
| + for try_job in try_jobs:
|
| + try_job_key = try_job['try_job_key']
|
| + task = step_tasks_info.get('swarming_tasks', {}).get(try_job_key)
|
| +
|
| + if task['task_info']['status'] != wf_analysis_status.ANALYZED:
|
| + # There is someting wrong with swarming task or it's not done yet,
|
| + # no try job yet or ever.
|
| + try_job['status'] = result_status.NO_TRY_JOB_REASON_MAP[
|
| + task['task_info']['status']]
|
| + try_job['tests'] = task.get('all_tests', [])
|
| + else:
|
| + # Swarming task is completed, group tests according to task result.
|
| + try_job['ref_name'] = task['ref_name']
|
| + if task.get('reliable_tests'):
|
| + try_job['tests'] = task['reliable_tests']
|
| + if task.get('flaky_tests'):
|
| + # Split this try job into two groups: flaky group and reliable group.
|
| + flaky_try_job = copy.deepcopy(try_job)
|
| + flaky_try_job['status'] = result_status.FLAKY
|
| + flaky_try_job['tests'] = task['flaky_tests']
|
| + flaky_try_job['task_id'] = task['task_info']['task_id']
|
| + flaky_try_job['task_url'] = task['task_info']['task_url']
|
| + additional_flakiness_list.append(flaky_try_job)
|
| + elif task.get('flaky_tests'): # pragma: no cover
|
| + # All Flaky.
|
| + try_job['status'] = result_status.FLAKY
|
| + try_job['tests'] = task['flaky_tests']
|
| +
|
| + if task['task_info'].get('task_id'):
|
| + try_job['task_id'] = task['task_info']['task_id']
|
| + try_job['task_url'] = task['task_info']['task_url']
|
| +
|
| + try_jobs.extend(additional_flakiness_list)
|
| +
|
| +
|
| +def _GetAllTryJobResultsForTest(failure_result_map, tasks_info):
|
| + culprits_info = defaultdict(lambda: defaultdict(list))
|
| + if not tasks_info:
|
| + return culprits_info
|
| +
|
| + try_job_keys = set()
|
| + for step_name, step_failure_result_map in failure_result_map.iteritems():
|
| + try_jobs = culprits_info[step_name]['try_jobs']
|
| +
|
| + if isinstance(step_failure_result_map, dict):
|
| + step_try_job_keys = set()
|
| + for try_job_key in step_failure_result_map.values():
|
| + if try_job_key not in step_try_job_keys:
|
| + try_job_dict = {
|
| + 'try_job_key': try_job_key
|
| + }
|
| + try_jobs.append(try_job_dict)
|
| + step_try_job_keys.add(try_job_key)
|
| + try_job_keys.update(step_try_job_keys)
|
| + else:
|
| + # Try job should only be triggered for swarming tests, because we cannot
|
| + # identify flaky tests for non-swarming tests.
|
| + try_job_dict = {
|
| + 'try_job_key': step_failure_result_map
|
| + }
|
| + try_jobs.append(try_job_dict)
|
| +
|
| + _UpdateTryJobInfoBasedOnSwarming(tasks_info[step_name], try_jobs)
|
| +
|
| + for try_job_key in try_job_keys:
|
| + _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info)
|
| +
|
| + return culprits_info
|
| +
|
| +
|
| +def _GetTryJobResultForCompile(failure_result_map):
|
| + try_job_key = failure_result_map['compile']
|
| + referred_build_keys = try_job_key.split('/')
|
| + culprit_info = defaultdict(lambda: defaultdict(list))
|
| +
|
| + try_job = WfTryJob.Get(*referred_build_keys)
|
| + if not try_job or try_job.test_results:
|
| + return culprit_info
|
| +
|
| + try_job_result = (
|
| + try_job.compile_results[-1] if try_job.compile_results else None)
|
| +
|
| + compile_try_job = {
|
| + 'try_job_key': try_job_key,
|
| + 'status': try_job.status
|
| + }
|
| +
|
| + if try_job_result:
|
| + if try_job_result.get('url'):
|
| + compile_try_job['try_job_url'] = try_job_result['url']
|
| + compile_try_job['try_job_build_number'] = (
|
| + _GetTryJobBuildNumber(try_job_result['url']))
|
| + if try_job_result.get('culprit', {}).get('compile'):
|
| + compile_try_job['culprit'] = try_job_result['culprit']['compile']
|
| +
|
| + culprit_info['compile']['try_jobs'].append(compile_try_job)
|
| + return culprit_info
|
|
|
|
|
| def GetAllTryJobResults(master_name, builder_name, build_number):
|
| culprits_info = {}
|
| - try_job_keys = set()
|
| + is_test_failure = True
|
|
|
| - analysis = WfAnalysis.Get(master_name, builder_name, build_number)
|
| - if not analysis:
|
| - return culprits_info
|
| + analysis_result, failure_result_map = _GetFailureResultMap(
|
| + master_name, builder_name, build_number)
|
|
|
| - failure_result_map = analysis.failure_result_map
|
| if failure_result_map:
|
| - # failure_result_map uses step_names as keys and saves referred try_job_keys
|
| - # If non-swarming, step_name and referred_try_job_key match directly as:
|
| - # step_name: try_job_key
|
| - # If swarming, add one more layer of tests, so the format would be:
|
| - # step_name: {
|
| - # test_name1: try_job_key1,
|
| - # test_name2: try_job_key2,
|
| - # ...
|
| - # }
|
| - for step_name, step_failure_result_map in failure_result_map.iteritems():
|
| - if isinstance(step_failure_result_map, dict):
|
| - step_refering_keys = set()
|
| - for failed_test, try_job_key in step_failure_result_map.iteritems():
|
| - step_test_key = '%s-%s' % (step_name, failed_test)
|
| - culprits_info[step_test_key] = {
|
| - 'step_name': step_name,
|
| - 'test_name': failed_test,
|
| - 'try_job_key': try_job_key
|
| - }
|
| - step_refering_keys.add(try_job_key)
|
| -
|
| - _UpdateTryJobCulpritUsingSwarmingTask(
|
| - step_name, step_refering_keys, culprits_info)
|
| - try_job_keys.update(step_refering_keys)
|
| - else:
|
| - culprits_info[step_name] = {
|
| - 'step_name': step_name,
|
| - 'test_name': 'N/A',
|
| - 'try_job_key': step_failure_result_map
|
| - }
|
| - try_job_keys.add(step_failure_result_map)
|
| -
|
| - for try_job_key in try_job_keys:
|
| - _GetCulpritInfoForTryJobResult(try_job_key, culprits_info)
|
| -
|
| + for step_name in failure_result_map:
|
| + if step_name.lower() == 'compile':
|
| + is_test_failure = False
|
| + break
|
| + if is_test_failure:
|
| + tasks_info = _GenerateSwarmingTasksData(failure_result_map)
|
| + culprits_info = _GetAllTryJobResultsForTest(
|
| + failure_result_map, tasks_info)
|
| + else:
|
| + culprits_info = _GetTryJobResultForCompile(failure_result_map)
|
| + elif analysis_result:
|
| + for failure in analysis_result['failures']:
|
| + step_name = failure['step_name']
|
| + tests = []
|
| + for test in failure.get('tests', []):
|
| + tests.append(test['test_name'])
|
| +
|
| + culprits_info[step_name] = {
|
| + 'try_jobs': [
|
| + {
|
| + 'status': result_status.NO_FAILURE_RESULT_MAP,
|
| + 'tests': tests
|
| + }
|
| + ]
|
| + }
|
| return culprits_info
|
|
|