Chromium Code Reviews| Index: appengine/findit/handlers/handlers_util.py |
| diff --git a/appengine/findit/handlers/handlers_util.py b/appengine/findit/handlers/handlers_util.py |
| index 105c24ba6fb5f654f347f9682eaca4fb166c0249..a941de3bf4a81932fd89030a6695953c332e7146 100644 |
| --- a/appengine/findit/handlers/handlers_util.py |
| +++ b/appengine/findit/handlers/handlers_util.py |
| @@ -13,101 +13,146 @@ from waterfall import buildbot |
| from waterfall import waterfall_config |
| -FLAKY = 'Flaky' |
| +FLAKY = 200 |
|
stgao
2016/03/25 22:50:23
How about creating a message module for these?
FL
chanli
2016/03/25 23:22:03
Per our previous discussion, the status here are a
|
| +# Additional status for swarming tasks. |
| +NO_SWARMING_TASK_FOUND = 110 |
| +NON_SWARMING_NO_RERUN = 120 |
| -def GenerateSwarmingTasksData(master_name, builder_name, build_number): |
| +# Additional status for try jobs. |
| +NO_TRY_JOB_REASON_MAP = { |
| + NO_SWARMING_TASK_FOUND: NO_SWARMING_TASK_FOUND, |
| + NON_SWARMING_NO_RERUN: NON_SWARMING_NO_RERUN, |
| + wf_analysis_status.PENDING: 130, |
| + wf_analysis_status.ANALYZING: 140, |
| + wf_analysis_status.ERROR: 150, |
| +} |
| + |
| + |
| +def _GetFailureResultMap(master_name, builder_name, build_number): |
| + analysis = WfAnalysis.Get(master_name, builder_name, build_number) |
| + if not analysis: |
| + return None |
| + |
| + return analysis.failure_result_map |
| + |
| + |
| + |
| +def _GetAllTestsForASwarmingTask(task_key, step_failure_result_map): |
|
stgao
2016/03/25 22:50:24
nit: three empty lines above.
chanli
2016/03/25 23:22:03
Done.
|
| + all_tests = set() |
| + for test_name, test_task_key in step_failure_result_map.iteritems(): |
| + if task_key == test_task_key: |
| + all_tests.add(test_name) |
| + return list(all_tests) |
| + |
| + |
| +def _GenerateSwarmingTasksData(failure_result_map): |
| """Collects info for all related swarming tasks. |
| Returns: A dict as below: |
| { |
| 'step1': { |
| - 'swarming_tasks': [ |
| - { |
| - 'status': 'Completed', |
| - 'task_id': 'task1', |
| - 'task_url': ( |
| - 'https://chromium-swarm.appspot.com/user/task/task1'), |
| - 'tests': ['test2'] |
| - }, |
| - { |
| - 'status': 'Completed', |
| - 'task_id': 'task0', |
| - 'task_url': ( |
| - 'https://chromium-swarm.appspot.com/user/task/task0'), |
| - 'tests': ['test1'] |
| - } |
| - ], |
| - 'tests': { |
| - 'test1': { |
| - 'status': 'Completed', |
| - 'task_id': 'task0', |
| - 'task_url': ( |
| - 'https://chromium-swarm.appspot.com/user/task/task0') |
| - }, |
| - 'test2': { |
| - 'status': 'Completed', |
| - 'task_id': 'task1', |
| - 'task_url': ( |
| - 'https://chromium-swarm.appspot.com/user/task/task1') |
| + 'swarming_tasks': { |
| + 'm/b/121': { |
| + 'task_info': { |
| + 'status': 'Completed', |
| + 'task_id': 'task1', |
| + 'task_url': ('https://chromium-swarm.appspot.com/user' |
| + '/task/task1') |
| + }, |
| + 'all_tests': ['test2', 'test3', 'test4'], |
| + 'reliable_tests': ['test2'], |
| + 'flaky_tests': ['test3', 'test4'] |
| } |
| } |
| }, |
| 'step2': { |
| - 'swarming_tasks': [ |
| - { |
| - 'status': 'Pending' |
| + 'swarming_tasks': { |
| + 'm/b/121': { |
| + 'task_info': { |
| + 'status': 'Pending' |
| + }, |
| + 'all_tests': ['test1'] |
| } |
| - ], |
| - 'tests': { |
| - 'test1': { |
| - 'status': 'Pending' |
| + } |
| + }, |
| + 'step3': { |
| + 'swarming_tasks': { |
| + 'm/b/121': { |
| + 'task_info': { |
| + 'status': 'No swarming rerun found' |
| + }, |
| + 'all_tests': ['test1'] |
| } |
| } |
| } |
| } |
| """ |
| - tasks_info = defaultdict(dict) |
| + swarming_server = waterfall_config.GetSwarmingSettings()['server_host'] |
|
stgao
2016/03/25 22:50:23
This datastore read is not needed if we don't proc
chanli
2016/03/25 23:22:03
Done.
|
| - analysis = WfAnalysis.Get(master_name, builder_name, build_number) |
| - if not analysis: |
| - return tasks_info |
| + tasks_info = defaultdict(lambda: defaultdict(lambda: defaultdict(dict))) |
| - failure_result_map = analysis.failure_result_map |
| if failure_result_map: |
| for step_name, failure in failure_result_map.iteritems(): |
| + step_tasks_info = tasks_info[step_name]['swarming_tasks'] |
| if isinstance(failure, dict): |
| - # Only trigger swarming task for swarming test failures. |
| - key_test_map = defaultdict(list) |
| - for test_name, first_failure_key in failure.iteritems(): |
| - key_test_map[first_failure_key].append(test_name) |
| - |
| - tasks_info[step_name]['swarming_tasks'] = [] |
| - tasks_info[step_name]['tests'] = defaultdict(dict) |
| - step_tasks_info = tasks_info[step_name]['swarming_tasks'] |
| - tests = tasks_info[step_name]['tests'] |
| - for key, test_names in key_test_map.iteritems(): |
| + # Only swarming test failures have swarming re-runs. |
| + swarming_task_keys = set() |
| + for first_failure_key in failure.values(): |
| + swarming_task_keys.add(first_failure_key) |
|
stgao
2016/03/25 22:50:24
Maybe simplified with:
swarming_task_keys = set(f
chanli
2016/03/25 23:22:03
Done.
|
| + |
| + for key in swarming_task_keys: |
| + task_dict = step_tasks_info[key] |
| referred_build_keys = key.split('/') |
| task = WfSwarmingTask.Get(*referred_build_keys, step_name=step_name) |
| - if not task: |
| - continue |
| - task_info = { |
| - 'status': wf_analysis_status.SWARMING_STATUS_TO_DESCRIPTION.get( |
| - task.status) |
| - } |
| - if task.task_id: |
| - task_info['task_id'] = task.task_id |
| - task_info['task_url'] = 'https://%s/user/task/%s' % ( |
| - waterfall_config.GetSwarmingSettings()['server_host'], |
| - task.task_id) |
| + if not task: # In case task got manually removed from data store. |
| + task_info = { |
| + 'status': NO_SWARMING_TASK_FOUND |
| + } |
| + task_dict['all_tests'] = _GetAllTestsForASwarmingTask(key, failure) |
| + else: |
| + task_info = { |
| + 'status': task.status |
| + } |
| + |
| + task_dict['all_tests'] = ( |
| + _GetAllTestsForASwarmingTask(key, failure) |
| + if not (task.parameters and task.parameters.get('tests')) |
| + else task.parameters['tests']) |
| + |
| + # Get the step name without platform. |
| + # This value should have been saved in task.parameters; |
| + # in case of no such value saved, split the step_name. |
| + task_dict['ref_name'] = ( |
| + step_name.split()[0] |
| + if not task.parameters or not task.parameters.get('ref_name') |
| + else task.parameters['ref_name']) |
| + |
| + if task.task_id: # Swarming rerun has started. |
| + task_info['task_id'] = task.task_id |
| + task_info['task_url'] = 'https://%s/user/task/%s' % ( |
| + swarming_server, task.task_id) |
| + if task.classified_tests: # Swarming rerun has result. |
|
stgao
2016/03/25 22:50:23
This is when the Swarming task completed?
chanli
2016/03/25 23:22:03
Yes. this is derived from the result of swarming t
|
| + task_dict['reliable_tests'] = task.classified_tests.get( |
| + 'reliable_tests', []) |
| + task_dict['flaky_tests'] = task.classified_tests.get( |
| + 'flaky_tests', []) |
| + |
| + task_dict['task_info'] = task_info |
| + else: |
| + step_tasks_info[failure] = { |
| + 'task_info': { |
| + 'status': NON_SWARMING_NO_RERUN |
| + } |
| + } |
| - for test_name in test_names: |
| - tests[test_name] = copy.deepcopy(task_info) |
| + return tasks_info |
| - task_info['tests'] = test_names |
| - step_tasks_info.append(task_info) |
| - return tasks_info |
| +def GetSwarmingTaskInfo(master_name, builder_name, build_number): |
| + failure_result_map = _GetFailureResultMap( |
| + master_name, builder_name, build_number) |
|
stgao
2016/03/25 22:50:23
nit: indent
chanli
2016/03/25 23:22:03
Done.
|
| + return _GenerateSwarmingTasksData(failure_result_map) |
| def _GetTryJobBuildNumber(url): |
| @@ -115,138 +160,264 @@ def _GetTryJobBuildNumber(url): |
| return build_keys[2] |
| -def _GetCulpritInfoForTryJobResult(try_job_key, culprits_info): |
| +def _OrganizeTryJobResultByCulprits(try_job_culprits): |
| + """Re-organize try job culprits by revision. |
| + |
| + Args: |
| + try_job_culprits (dict): A dict of culprits for one step organized by test: |
| + { |
| + 'tests': { |
| + 'a_test1': { |
| + 'revision': 'rev1', |
| + 'commit_position': '1', |
| + 'review_url': 'url_1' |
| + }, |
| + 'a_test2': { |
| + 'revision': 'rev1', |
| + 'commit_position': '1', |
| + 'review_url': 'url_1' |
| + } |
| + } |
| + } |
| + Returns: |
| + A dict of culprits for one step organized by revison: |
| + { |
| + 'rev1': { |
| + 'revision': 'rev1', |
| + 'commit_position': '1', |
| + 'review_url': 'url_1', |
| + 'tests': ['a_test1', 'a_test2'] |
| + } |
| + } |
| + """ |
| + if not try_job_culprits or not try_job_culprits.get('tests'): |
| + return {} |
| + |
| + organized_culprits = {} |
| + for test_name, culprit in try_job_culprits['tests'].iteritems(): |
| + revision = culprit['revision'] |
| + if organized_culprits.get(revision): |
| + organized_culprits[revision]['failed_tests'].append(test_name) |
| + else: |
| + organized_culprits[revision] = culprit |
| + organized_culprits[revision]['failed_tests'] = [test_name] |
| + |
| + return organized_culprits |
| + |
| + |
| +def _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info): |
| referred_build_keys = try_job_key.split('/') |
| try_job = WfTryJob.Get(*referred_build_keys) |
| - if not try_job: |
| + if not try_job or try_job.compile_results: |
| return |
| - if try_job.compile_results: |
| - try_job_result = try_job.compile_results[-1] |
| - elif try_job.test_results: |
| - try_job_result = try_job.test_results[-1] |
| - else: |
| - try_job_result = None |
| - |
| - additional_tests_culprit_info = {} |
| - for culprit_info in culprits_info.values(): |
| - if culprit_info['try_job_key'] != try_job_key: |
| - continue |
| - |
| - # Only include try job result for reliable tests. |
| - # Flaky tests have been marked as 'Flaky'. |
| - culprit_info['status'] = ( |
| - wf_analysis_status.TRY_JOB_STATUS_TO_DESCRIPTION[try_job.status] |
| - if not culprit_info.get('status') else culprit_info['status']) |
| - |
| - if try_job_result and culprit_info['status'] != FLAKY: |
| - if try_job_result.get('url'): |
| - culprit_info['try_job_url'] = try_job_result['url'] |
| - culprit_info['try_job_build_number'] = ( |
| + try_job_result = try_job.test_results[-1] if try_job.test_results else None |
| + |
| + for step_try_jobs in culprits_info.values(): |
| + # If try job found different culprits for each test, split tests by culprit. |
| + additional_tests_culprit_info = [] |
| + for try_job_info in step_try_jobs['try_jobs']: |
| + if try_job_key != try_job_info['try_job_key']: |
| + continue |
| + |
| + if try_job_info.get('status'): |
|
stgao
2016/03/25 22:50:24
"if not" instead?
chanli
2016/03/25 23:22:03
I use 'if' here to reduce one level of indentation
|
| + # The try job has been updated by swarming task, no try job yet or ever. |
|
stgao
2016/03/25 22:50:23
this comment seems not clear enough.
chanli
2016/03/25 23:22:03
Done.
|
| + continue |
| + |
| + try_job_info['status'] = try_job.status |
| + # Needs to use ref_name to match step_name in try job. |
| + ref_name = try_job_info['ref_name'] |
|
stgao
2016/03/25 22:50:23
nit: not used outside of if below.
chanli
2016/03/25 23:22:03
Done.
|
| + if try_job_result: |
| + # Saves try job information. |
| + try_job_info['try_job_url'] = try_job_result['url'] |
| + try_job_info['try_job_build_number'] = ( |
| _GetTryJobBuildNumber(try_job_result['url'])) |
| - if try_job_result.get('culprit'): |
| - try_job_culprits = try_job_result['culprit'] |
| - step = culprit_info.get('step_no_platform', culprit_info['step_name']) |
| - test = culprit_info['test_name'] |
| - |
| - if test == 'N/A': # Only step level. |
| - if try_job_culprits.get(step, {}).get('tests'): |
| - # try job results has specified tests. |
| - step_culprits = try_job_culprits[step]['tests'] |
| - for test_name, try_job_culprit in step_culprits.iteritems(): |
| - additional_test_key = '%s-%s' % (step, test_name) |
| - additional_tests_culprit_info[additional_test_key] = { |
| - 'step_name': step, |
| - 'test_name': test_name, |
| - 'try_job_key': try_job_key, |
| - 'status': culprit_info['status'], |
| - 'try_job_url': culprit_info['try_job_url'], |
| - 'try_job_build_number': culprit_info['try_job_build_number'], |
| - 'revision': try_job_culprit.get('revision'), |
| - 'commit_position': try_job_culprit.get('commit_position'), |
| - 'review_url': try_job_culprit.get('review_url') |
| - } |
| - continue |
| - else: |
| - # For historical culprit found by try job for compile, |
| - # step name is not recorded. |
| - culprit = try_job_culprits.get(step) or try_job_culprits |
| - elif test in try_job_culprits.get(step, {}).get('tests'): |
| - culprit = try_job_culprits[step]['tests'][test] |
| - else: # pragma: no cover |
| - continue # No culprit for test found. |
| - |
| - culprit_info['revision'] = culprit.get('revision') |
| - culprit_info['commit_position'] = culprit.get('commit_position') |
| - culprit_info['review_url'] = culprit.get('review_url') |
| - |
| - if additional_tests_culprit_info: |
| - for key, test_culprit_info in additional_tests_culprit_info.iteritems(): |
| - culprits_info.pop(test_culprit_info['step_name'], None) |
| - culprits_info[key] = test_culprit_info |
| - |
| - |
| -def _UpdateTryJobCulpritUsingSwarmingTask( |
| - step_name, failure_key_set, culprits_info): |
| - for failure_key in failure_key_set: |
| - build_keys = failure_key.split('/') |
| - task = WfSwarmingTask.Get(*build_keys, step_name=step_name) |
| - if not task: |
| - continue |
| - classified_tests = task.classified_tests |
| - step_no_platform = task.parameters.get( |
| - 'ref_name', step_name.split()[0]) |
| - for culprit_info in culprits_info.values(): |
| - if (culprit_info['try_job_key'] == failure_key and |
| - step_name == culprit_info['step_name']): |
| - culprit_info['step_no_platform'] = step_no_platform |
| - if culprit_info['test_name'] in classified_tests.get('flaky_tests', []): |
| - culprit_info['status'] = FLAKY |
| + if (try_job_result.get('culprit') and |
| + try_job_result['culprit'].get(ref_name)): |
| + # Saves try job culprits information. |
| + |
| + # Uses culprits to group tests. |
| + culprit_tests_map = _OrganizeTryJobResultByCulprits( |
| + try_job_result['culprit'][ref_name]) |
| + all_tests = try_job_info['tests'] |
| + list_of_culprits = [] |
| + for culprit_info in culprit_tests_map.values(): |
| + failed_tests = culprit_info['failed_tests'] |
| + list_of_culprits.append(culprit_info) |
| + # Gets left over tests that didn't failed because of this culprit. |
| + all_tests = list(set(all_tests) ^ set(failed_tests)) |
|
stgao
2016/03/25 22:50:24
Name of ``all_tests`` seems a little misleading.
chanli
2016/03/25 23:22:03
Done.
|
| + if not all_tests: |
| + break |
|
stgao
2016/03/25 22:50:24
Why we stop here? Mind a comment?
chanli
2016/03/25 23:22:03
Done.
|
| + |
| + index_start = 1 |
| + if all_tests: |
| + # There are tests don't have try job culprits. |
| + # Group these tests together. |
| + # Save them in current try_job_info. |
| + try_job_info['tests'] = all_tests |
| + try_job_info['culprit'] = {} |
| + # Saves all the tests that have culprits later. |
| + index_start = 0 |
| + else: |
| + # Saves the first culprit in current try_job_info. |
| + # Saves all the other culprits later. |
| + try_job_info['culprit'] = { |
| + 'revision': list_of_culprits[0]['revision'], |
| + 'commit_position': list_of_culprits[0]['commit_position'], |
| + 'review_url': list_of_culprits[0]['review_url'] |
| + } |
| + try_job_info['tests'] = list_of_culprits[0]['failed_tests'] |
| + |
| + for n in xrange(index_start, len(list_of_culprits)): |
| + iterate_culprit = list_of_culprits[n] |
| + tmp_try_job_info = copy.deepcopy(try_job_info) |
| + tmp_try_job_info['culprit'] = { |
| + 'revision': iterate_culprit['revision'], |
| + 'commit_position': iterate_culprit['commit_position'], |
| + 'review_url': iterate_culprit['review_url'] |
| + } |
| + tmp_try_job_info['tests'] = iterate_culprit['failed_tests'] |
| + additional_tests_culprit_info.append(tmp_try_job_info) |
| + |
| + if additional_tests_culprit_info: |
| + step_try_jobs['try_jobs'].extend(additional_tests_culprit_info) |
| + |
| + |
| +def _UpdateTryJobInfoBasedOnSwarming(step_tasks_info, try_jobs): |
| + """ |
| + Args: |
| + step_tasks_info (dict): A dict of swarming task info for this step. |
| + It is the result from _GenerateSwarmingTasksData. |
| + try_jobs (list): A list to save try job data for the step, format as below: |
| + [ |
| + { |
| + 'try_job_key': 'm/b/120' |
| + }, |
| + { |
| + 'try_job_key': 'm/b/121' |
| + }, |
| + ... |
| + ] |
| + """ |
| + additional_flakiness_list = [] |
| + for try_job in try_jobs: |
| + try_job_key = try_job['try_job_key'] |
| + for swarming_task_key, task in step_tasks_info.get( |
| + 'swarming_tasks', {}).iteritems(): |
| + if swarming_task_key != try_job_key: |
|
stgao
2016/03/25 22:50:23
Why we need this check? A comment to explain is ap
chanli
2016/03/25 23:22:03
Yes... There will only be one task for each step i
|
| + continue |
| + |
| + if task['task_info']['status'] != wf_analysis_status.ANALYZED: |
| + # There is someting wrong with swarming task or it's not done yet, |
| + # no try job yet or ever. |
| + try_job['status'] = NO_TRY_JOB_REASON_MAP[task['task_info']['status']] |
| + try_job['tests'] = task['all_tests'] |
| + else: |
| + try_job['ref_name'] = task['ref_name'] |
| + if task.get('reliable_tests'): |
| + try_job['tests'] = task['reliable_tests'] |
| + elif task.get('flaky_tests'): # pragma: no cover |
|
stgao
2016/03/25 22:50:23
It might be possible that a step has both flaky an
chanli
2016/03/25 23:22:03
Yes. That case is handled by ln329 - ln336.
|
| + # All Flaky. |
| + try_job['status'] = FLAKY |
| + try_job['tests'] = task['flaky_tests'] |
| + |
| + if task['task_info'].get('task_id'): |
| + try_job['task_id'] = task['task_info']['task_id'] |
| + try_job['task_url'] = task['task_info']['task_url'] |
| + |
| + if task.get('flaky_tests') and task.get('reliable_tests'): |
| + # Split this try job into two groups: flaky group and reliable group. |
| + flaky_try_job = copy.deepcopy(try_job) |
| + flaky_try_job['status'] = FLAKY |
| + flaky_try_job['tests'] = task['flaky_tests'] |
| + flaky_try_job['task_id'] = task['task_info']['task_id'] |
| + flaky_try_job['task_url'] = task['task_info']['task_url'] |
| + additional_flakiness_list.append(flaky_try_job) |
| + |
| + if additional_flakiness_list: |
| + try_jobs.extend(additional_flakiness_list) |
| + |
| + |
| +def _GetAllTryJobResultsForTest(failure_result_map, tasks_info): |
| + culprits_info = defaultdict(lambda: defaultdict(list)) |
| + if not tasks_info: |
| + return culprits_info |
| -def GetAllTryJobResults(master_name, builder_name, build_number): |
| - culprits_info = {} |
| try_job_keys = set() |
| + for step_name, step_failure_result_map in failure_result_map.iteritems(): |
| + try_jobs = culprits_info[step_name]['try_jobs'] |
| - analysis = WfAnalysis.Get(master_name, builder_name, build_number) |
| - if not analysis: |
| - return culprits_info |
| - |
| - failure_result_map = analysis.failure_result_map |
| - if failure_result_map: |
| - # failure_result_map uses step_names as keys and saves referred try_job_keys |
| - # If non-swarming, step_name and referred_try_job_key match directly as: |
| - # step_name: try_job_key |
| - # If swarming, add one more layer of tests, so the format would be: |
| - # step_name: { |
| - # test_name1: try_job_key1, |
| - # test_name2: try_job_key2, |
| - # ... |
| - # } |
| - for step_name, step_failure_result_map in failure_result_map.iteritems(): |
| - if isinstance(step_failure_result_map, dict): |
| - step_refering_keys = set() |
| - for failed_test, try_job_key in step_failure_result_map.iteritems(): |
| - step_test_key = '%s-%s' % (step_name, failed_test) |
| - culprits_info[step_test_key] = { |
| - 'step_name': step_name, |
| - 'test_name': failed_test, |
| + if isinstance(step_failure_result_map, dict): |
| + for try_job_key in step_failure_result_map.values(): |
| + if try_job_key not in try_job_keys: |
| + try_job_dict = { |
| 'try_job_key': try_job_key |
| } |
| - step_refering_keys.add(try_job_key) |
| + try_jobs.append(try_job_dict) |
| + try_job_keys.add(try_job_key) |
| + else: |
| + # Try job should only be triggered for swarming tests, because we cannot |
| + # identify flaky tests for non-swarming tests. |
| + try_job_dict = { |
| + 'try_job_key': step_failure_result_map |
| + } |
| + try_jobs.append(try_job_dict) |
| - _UpdateTryJobCulpritUsingSwarmingTask( |
| - step_name, step_refering_keys, culprits_info) |
| - try_job_keys.update(step_refering_keys) |
| - else: |
| - culprits_info[step_name] = { |
| - 'step_name': step_name, |
| - 'test_name': 'N/A', |
| - 'try_job_key': step_failure_result_map |
| - } |
| - try_job_keys.add(step_failure_result_map) |
| + _UpdateTryJobInfoBasedOnSwarming(tasks_info[step_name], try_jobs) |
| - for try_job_key in try_job_keys: |
| - _GetCulpritInfoForTryJobResult(try_job_key, culprits_info) |
| + for try_job_key in try_job_keys: |
| + _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info) |
| + |
| + return culprits_info |
| + |
| + |
| +def _GetTryJobResultForCompile(failure_result_map): |
| + try_job_key = failure_result_map['compile'] |
| + referred_build_keys = try_job_key.split('/') |
| + culprit_info = defaultdict(lambda: defaultdict(list)) |
| + |
| + try_job = WfTryJob.Get(*referred_build_keys) |
| + if not try_job or try_job.test_results: |
| + return culprit_info |
| + |
| + try_job_result = ( |
| + try_job.compile_results[-1] if try_job.compile_results else None) |
| + |
| + compile_try_job = { |
| + 'try_job_key': try_job_key, |
| + 'status': try_job.status |
| + } |
| + |
| + if try_job_result: |
| + if try_job_result.get('url'): |
| + compile_try_job['try_job_url'] = try_job_result['url'] |
| + compile_try_job['try_job_build_number'] = ( |
| + _GetTryJobBuildNumber(try_job_result['url'])) |
| + if try_job_result.get('culprit', {}).get('compile'): |
| + compile_try_job['culprit'] = try_job_result['culprit']['compile'] |
| + |
| + culprit_info['compile']['try_jobs'].append(compile_try_job) |
| + return culprit_info |
| + |
| + |
| +def GetAllTryJobResults(master_name, builder_name, build_number): |
| + culprits_info = {} |
| + is_test_failure = True |
| + |
| + failure_result_map = _GetFailureResultMap( |
| + master_name, builder_name, build_number) |
|
stgao
2016/03/25 22:50:23
nit: indent.
chanli
2016/03/25 23:22:03
Done.
|
| + |
| + if failure_result_map: |
| + for step_name in failure_result_map: |
| + if step_name == 'compile': |
| + is_test_failure = False |
| + break |
| + if is_test_failure: |
| + tasks_info = _GenerateSwarmingTasksData(failure_result_map) |
| + culprits_info = _GetAllTryJobResultsForTest( |
| + failure_result_map, tasks_info) |
| + else: |
| + culprits_info = _GetTryJobResultForCompile(failure_result_map) |
| return culprits_info |