Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright 2016 The Chromium Authors. All rights reserved. | 1 # Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 from collections import defaultdict | 5 from collections import defaultdict |
| 6 import copy | 6 import copy |
| 7 | 7 |
| 8 from handlers import result_status | |
| 8 from model import wf_analysis_status | 9 from model import wf_analysis_status |
| 9 from model.wf_analysis import WfAnalysis | 10 from model.wf_analysis import WfAnalysis |
| 10 from model.wf_swarming_task import WfSwarmingTask | 11 from model.wf_swarming_task import WfSwarmingTask |
| 11 from model.wf_try_job import WfTryJob | 12 from model.wf_try_job import WfTryJob |
| 12 from waterfall import buildbot | 13 from waterfall import buildbot |
| 13 from waterfall import waterfall_config | 14 from waterfall import waterfall_config |
| 14 | 15 |
| 15 | 16 |
| 16 FLAKY = 'Flaky' | 17 def _GetFailureResultMap(master_name, builder_name, build_number): |
| 18 analysis = WfAnalysis.Get(master_name, builder_name, build_number) | |
| 19 if not analysis: | |
| 20 return None | |
| 21 | |
| 22 return analysis.failure_result_map | |
| 17 | 23 |
| 18 | 24 |
| 19 def GenerateSwarmingTasksData(master_name, builder_name, build_number): | 25 def _GetAllTestsForASwarmingTask(task_key, step_failure_result_map): |
| 26 all_tests = set() | |
| 27 for test_name, test_task_key in step_failure_result_map.iteritems(): | |
| 28 if task_key == test_task_key: | |
| 29 all_tests.add(test_name) | |
| 30 return list(all_tests) | |
| 31 | |
| 32 | |
| 33 def _GenerateSwarmingTasksData(failure_result_map): | |
| 20 """Collects info for all related swarming tasks. | 34 """Collects info for all related swarming tasks. |
| 21 | 35 |
| 22 Returns: A dict as below: | 36 Returns: A dict as below: |
| 23 { | 37 { |
| 24 'step1': { | 38 'step1': { |
| 25 'swarming_tasks': [ | 39 'swarming_tasks': { |
| 26 { | 40 'm/b/121': { |
| 27 'status': 'Completed', | 41 'task_info': { |
| 28 'task_id': 'task1', | 42 'status': 'Completed', |
| 29 'task_url': ( | 43 'task_id': 'task1', |
| 30 'https://chromium-swarm.appspot.com/user/task/task1'), | 44 'task_url': ('https://chromium-swarm.appspot.com/user' |
| 31 'tests': ['test2'] | 45 '/task/task1') |
| 32 }, | 46 }, |
| 33 { | 47 'all_tests': ['test2', 'test3', 'test4'], |
| 34 'status': 'Completed', | 48 'reliable_tests': ['test2'], |
| 35 'task_id': 'task0', | 49 'flaky_tests': ['test3', 'test4'] |
| 36 'task_url': ( | |
| 37 'https://chromium-swarm.appspot.com/user/task/task0'), | |
| 38 'tests': ['test1'] | |
| 39 } | |
| 40 ], | |
| 41 'tests': { | |
| 42 'test1': { | |
| 43 'status': 'Completed', | |
| 44 'task_id': 'task0', | |
| 45 'task_url': ( | |
| 46 'https://chromium-swarm.appspot.com/user/task/task0') | |
| 47 }, | |
| 48 'test2': { | |
| 49 'status': 'Completed', | |
| 50 'task_id': 'task1', | |
| 51 'task_url': ( | |
| 52 'https://chromium-swarm.appspot.com/user/task/task1') | |
| 53 } | 50 } |
| 54 } | 51 } |
| 55 }, | 52 }, |
| 56 'step2': { | 53 'step2': { |
| 57 'swarming_tasks': [ | 54 'swarming_tasks': { |
| 58 { | 55 'm/b/121': { |
| 59 'status': 'Pending' | 56 'task_info': { |
| 57 'status': 'Pending' | |
| 58 }, | |
| 59 'all_tests': ['test1'] | |
| 60 } | 60 } |
| 61 ], | 61 } |
| 62 'tests': { | 62 }, |
| 63 'test1': { | 63 'step3': { |
| 64 'status': 'Pending' | 64 'swarming_tasks': { |
| 65 'm/b/121': { | |
| 66 'task_info': { | |
| 67 'status': 'No swarming rerun found' | |
| 68 }, | |
| 69 'all_tests': ['test1'] | |
| 65 } | 70 } |
| 66 } | 71 } |
| 67 } | 72 } |
| 68 } | 73 } |
| 69 """ | 74 """ |
| 70 tasks_info = defaultdict(dict) | |
| 71 | 75 |
| 72 analysis = WfAnalysis.Get(master_name, builder_name, build_number) | 76 tasks_info = defaultdict(lambda: defaultdict(lambda: defaultdict(dict))) |
| 73 if not analysis: | |
| 74 return tasks_info | |
| 75 | 77 |
| 76 failure_result_map = analysis.failure_result_map | |
| 77 if failure_result_map: | 78 if failure_result_map: |
| 79 swarming_server = waterfall_config.GetSwarmingSettings()['server_host'] | |
| 78 for step_name, failure in failure_result_map.iteritems(): | 80 for step_name, failure in failure_result_map.iteritems(): |
| 81 step_tasks_info = tasks_info[step_name]['swarming_tasks'] | |
| 79 if isinstance(failure, dict): | 82 if isinstance(failure, dict): |
| 80 # Only trigger swarming task for swarming test failures. | 83 # Only swarming test failures have swarming re-runs. |
| 81 key_test_map = defaultdict(list) | 84 swarming_task_keys = set(failure.values()) |
| 82 for test_name, first_failure_key in failure.iteritems(): | |
| 83 key_test_map[first_failure_key].append(test_name) | |
| 84 | 85 |
| 85 tasks_info[step_name]['swarming_tasks'] = [] | 86 for key in swarming_task_keys: |
| 86 tasks_info[step_name]['tests'] = defaultdict(dict) | 87 task_dict = step_tasks_info[key] |
| 87 step_tasks_info = tasks_info[step_name]['swarming_tasks'] | |
| 88 tests = tasks_info[step_name]['tests'] | |
| 89 for key, test_names in key_test_map.iteritems(): | |
| 90 referred_build_keys = key.split('/') | 88 referred_build_keys = key.split('/') |
| 91 task = WfSwarmingTask.Get(*referred_build_keys, step_name=step_name) | 89 task = WfSwarmingTask.Get(*referred_build_keys, step_name=step_name) |
| 92 if not task: | 90 if not task: # In case task got manually removed from data store. |
| 93 continue | 91 task_info = { |
| 94 task_info = { | 92 'status': result_status.NO_SWARMING_TASK_FOUND |
| 95 'status': wf_analysis_status.SWARMING_STATUS_TO_DESCRIPTION.get( | 93 } |
| 96 task.status) | 94 task_dict['all_tests'] = _GetAllTestsForASwarmingTask(key, failure) |
| 97 } | 95 else: |
| 98 if task.task_id: | 96 task_info = { |
| 99 task_info['task_id'] = task.task_id | 97 'status': task.status |
| 100 task_info['task_url'] = 'https://%s/user/task/%s' % ( | 98 } |
| 101 waterfall_config.GetSwarmingSettings()['server_host'], | |
| 102 task.task_id) | |
| 103 | 99 |
| 104 for test_name in test_names: | 100 task_dict['all_tests'] = ( |
| 105 tests[test_name] = copy.deepcopy(task_info) | 101 _GetAllTestsForASwarmingTask(key, failure) |
| 102 if not (task.parameters and task.parameters.get('tests')) | |
| 103 else task.parameters['tests']) | |
| 106 | 104 |
| 107 task_info['tests'] = test_names | 105 # Get the step name without platform. |
| 108 step_tasks_info.append(task_info) | 106 # This value should have been saved in task.parameters; |
| 107 # in case of no such value saved, split the step_name. | |
| 108 task_dict['ref_name'] = ( | |
| 109 step_name.split()[0] | |
| 110 if not task.parameters or not task.parameters.get('ref_name') | |
| 111 else task.parameters['ref_name']) | |
| 112 | |
| 113 if task.task_id: # Swarming rerun has started. | |
| 114 task_info['task_id'] = task.task_id | |
| 115 task_info['task_url'] = 'https://%s/user/task/%s' % ( | |
| 116 swarming_server, task.task_id) | |
| 117 if task.classified_tests: # Swarming rerun has result. | |
|
stgao
2016/03/28 22:27:22
As the swarming task already completed here, how a
chanli
2016/03/29 01:35:47
Done.
| |
| 118 task_dict['reliable_tests'] = task.classified_tests.get( | |
| 119 'reliable_tests', []) | |
| 120 task_dict['flaky_tests'] = task.classified_tests.get( | |
| 121 'flaky_tests', []) | |
| 122 | |
| 123 task_dict['task_info'] = task_info | |
| 124 else: | |
| 125 step_tasks_info[failure] = { | |
| 126 'task_info': { | |
| 127 'status': result_status.NON_SWARMING_NO_RERUN | |
| 128 } | |
| 129 } | |
| 109 | 130 |
| 110 return tasks_info | 131 return tasks_info |
| 111 | 132 |
| 112 | 133 |
| 134 def GetSwarmingTaskInfo(master_name, builder_name, build_number): | |
| 135 failure_result_map = _GetFailureResultMap( | |
| 136 master_name, builder_name, build_number) | |
| 137 return _GenerateSwarmingTasksData(failure_result_map) | |
| 138 | |
| 139 | |
| 113 def _GetTryJobBuildNumber(url): | 140 def _GetTryJobBuildNumber(url): |
| 114 build_keys = buildbot.ParseBuildUrl(url) | 141 build_keys = buildbot.ParseBuildUrl(url) |
| 115 return build_keys[2] | 142 return build_keys[2] |
| 116 | 143 |
| 117 | 144 |
| 118 def _GetCulpritInfoForTryJobResult(try_job_key, culprits_info): | 145 def _OrganizeTryJobResultByCulprits(try_job_culprits): |
| 146 """Re-organize try job culprits by revision. | |
| 147 | |
| 148 Args: | |
| 149 try_job_culprits (dict): A dict of culprits for one step organized by test: | |
| 150 { | |
| 151 'tests': { | |
| 152 'a_test1': { | |
| 153 'revision': 'rev1', | |
| 154 'commit_position': '1', | |
| 155 'review_url': 'url_1' | |
| 156 }, | |
| 157 'a_test2': { | |
| 158 'revision': 'rev1', | |
| 159 'commit_position': '1', | |
| 160 'review_url': 'url_1' | |
| 161 } | |
| 162 } | |
| 163 } | |
| 164 Returns: | |
| 165 A dict of culprits for one step organized by revison: | |
| 166 { | |
| 167 'rev1': { | |
| 168 'revision': 'rev1', | |
| 169 'commit_position': '1', | |
| 170 'review_url': 'url_1', | |
| 171 'tests': ['a_test1', 'a_test2'] | |
| 172 } | |
| 173 } | |
| 174 """ | |
| 175 if not try_job_culprits or not try_job_culprits.get('tests'): | |
| 176 return {} | |
| 177 | |
| 178 organized_culprits = {} | |
| 179 for test_name, culprit in try_job_culprits['tests'].iteritems(): | |
| 180 revision = culprit['revision'] | |
| 181 if organized_culprits.get(revision): | |
| 182 organized_culprits[revision]['failed_tests'].append(test_name) | |
| 183 else: | |
| 184 organized_culprits[revision] = culprit | |
| 185 organized_culprits[revision]['failed_tests'] = [test_name] | |
| 186 | |
| 187 return organized_culprits | |
| 188 | |
| 189 | |
| 190 def _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info): | |
| 119 referred_build_keys = try_job_key.split('/') | 191 referred_build_keys = try_job_key.split('/') |
| 120 try_job = WfTryJob.Get(*referred_build_keys) | 192 try_job = WfTryJob.Get(*referred_build_keys) |
| 121 if not try_job: | 193 if not try_job or try_job.compile_results: |
| 122 return | 194 return |
| 123 | 195 |
| 124 if try_job.compile_results: | 196 try_job_result = try_job.test_results[-1] if try_job.test_results else None |
| 125 try_job_result = try_job.compile_results[-1] | 197 |
| 126 elif try_job.test_results: | 198 for step_try_jobs in culprits_info.values(): |
| 127 try_job_result = try_job.test_results[-1] | 199 # If try job found different culprits for each test, split tests by culprit. |
| 128 else: | 200 additional_tests_culprit_info = [] |
| 129 try_job_result = None | 201 for try_job_info in step_try_jobs['try_jobs']: |
| 130 | 202 if try_job_key != try_job_info['try_job_key']: |
| 131 additional_tests_culprit_info = {} | 203 continue |
| 132 for culprit_info in culprits_info.values(): | 204 |
| 133 if culprit_info['try_job_key'] != try_job_key: | 205 if try_job_info.get('status'): |
| 134 continue | 206 # The try job has been updated by swarming task: |
|
stgao
2016/03/28 22:27:22
I don't quite understand why try job is updated by
chanli
2016/03/29 01:35:47
I am actually saying that if try_job_info.get('sta
| |
| 135 | 207 # If there is no swarming task, there won't be try job as well; |
| 136 # Only include try job result for reliable tests. | 208 # If the swarming task is not completed yet, there won't be try job yet; |
| 137 # Flaky tests have been marked as 'Flaky'. | 209 # If there are flaky tests found, those tests will be marked as flaky. |
| 138 culprit_info['status'] = ( | 210 continue |
| 139 wf_analysis_status.TRY_JOB_STATUS_TO_DESCRIPTION[try_job.status] | 211 |
| 140 if not culprit_info.get('status') else culprit_info['status']) | 212 try_job_info['status'] = try_job.status |
| 141 | 213 if try_job_result: |
| 142 if try_job_result and culprit_info['status'] != FLAKY: | 214 # Needs to use ref_name to match step_name in try job. |
| 143 if try_job_result.get('url'): | 215 ref_name = try_job_info['ref_name'] |
| 144 culprit_info['try_job_url'] = try_job_result['url'] | 216 # Saves try job information. |
| 145 culprit_info['try_job_build_number'] = ( | 217 if try_job_result.get('url'): |
| 146 _GetTryJobBuildNumber(try_job_result['url'])) | 218 try_job_info['try_job_url'] = try_job_result['url'] |
| 147 if try_job_result.get('culprit'): | 219 try_job_info['try_job_build_number'] = ( |
| 148 try_job_culprits = try_job_result['culprit'] | 220 _GetTryJobBuildNumber(try_job_result['url'])) |
| 149 step = culprit_info.get('step_no_platform', culprit_info['step_name']) | 221 |
| 150 test = culprit_info['test_name'] | 222 if (try_job_result.get('culprit') and |
| 151 | 223 try_job_result['culprit'].get(ref_name)): |
| 152 if test == 'N/A': # Only step level. | 224 # Saves try job culprits information. |
| 153 if try_job_culprits.get(step, {}).get('tests'): | 225 |
| 154 # try job results has specified tests. | 226 # Uses culprits to group tests. |
| 155 step_culprits = try_job_culprits[step]['tests'] | 227 culprit_tests_map = _OrganizeTryJobResultByCulprits( |
| 156 for test_name, try_job_culprit in step_culprits.iteritems(): | 228 try_job_result['culprit'][ref_name]) |
| 157 additional_test_key = '%s-%s' % (step, test_name) | 229 unrgouped_tests = try_job_info['tests'] |
| 158 additional_tests_culprit_info[additional_test_key] = { | 230 list_of_culprits = [] |
| 159 'step_name': step, | 231 for culprit_info in culprit_tests_map.values(): |
| 160 'test_name': test_name, | 232 failed_tests = culprit_info['failed_tests'] |
| 161 'try_job_key': try_job_key, | 233 list_of_culprits.append(culprit_info) |
| 162 'status': culprit_info['status'], | 234 # Gets tests that haven't been grouped. |
| 163 'try_job_url': culprit_info['try_job_url'], | 235 unrgouped_tests = list( |
| 164 'try_job_build_number': culprit_info['try_job_build_number'], | 236 set(unrgouped_tests) ^ set(failed_tests)) |
| 165 'revision': try_job_culprit.get('revision'), | 237 if not unrgouped_tests: |
| 166 'commit_position': try_job_culprit.get('commit_position'), | 238 # All tests have been grouped. |
| 167 'review_url': try_job_culprit.get('review_url') | 239 break |
| 168 } | 240 |
| 169 continue | 241 index_start = 1 |
| 242 if unrgouped_tests: | |
| 243 # There are tests don't have try job culprits. | |
| 244 # Group these tests together. | |
| 245 # Save them in current try_job_info. | |
| 246 try_job_info['tests'] = unrgouped_tests | |
| 247 try_job_info['culprit'] = {} | |
| 248 # Saves all the tests that have culprits later. | |
| 249 index_start = 0 | |
| 170 else: | 250 else: |
| 171 # For historical culprit found by try job for compile, | 251 # Saves the first culprit in current try_job_info. |
| 172 # step name is not recorded. | 252 # Saves all the other culprits later. |
| 173 culprit = try_job_culprits.get(step) or try_job_culprits | 253 try_job_info['culprit'] = { |
| 174 elif test in try_job_culprits.get(step, {}).get('tests'): | 254 'revision': list_of_culprits[0]['revision'], |
| 175 culprit = try_job_culprits[step]['tests'][test] | 255 'commit_position': list_of_culprits[0]['commit_position'], |
| 176 else: # pragma: no cover | 256 'review_url': list_of_culprits[0]['review_url'] |
| 177 continue # No culprit for test found. | 257 } |
| 178 | 258 try_job_info['tests'] = list_of_culprits[0]['failed_tests'] |
| 179 culprit_info['revision'] = culprit.get('revision') | 259 |
| 180 culprit_info['commit_position'] = culprit.get('commit_position') | 260 for n in xrange(index_start, len(list_of_culprits)): |
| 181 culprit_info['review_url'] = culprit.get('review_url') | 261 # Appends the rest of test groups to step_try_jobs['try_jobs']. |
| 182 | 262 iterate_culprit = list_of_culprits[n] |
| 183 if additional_tests_culprit_info: | 263 tmp_try_job_info = copy.deepcopy(try_job_info) |
| 184 for key, test_culprit_info in additional_tests_culprit_info.iteritems(): | 264 tmp_try_job_info['culprit'] = { |
| 185 culprits_info.pop(test_culprit_info['step_name'], None) | 265 'revision': iterate_culprit['revision'], |
| 186 culprits_info[key] = test_culprit_info | 266 'commit_position': iterate_culprit['commit_position'], |
| 187 | 267 'review_url': iterate_culprit['review_url'] |
| 188 | 268 } |
| 189 def _UpdateTryJobCulpritUsingSwarmingTask( | 269 tmp_try_job_info['tests'] = iterate_culprit['failed_tests'] |
| 190 step_name, failure_key_set, culprits_info): | 270 additional_tests_culprit_info.append(tmp_try_job_info) |
| 191 for failure_key in failure_key_set: | 271 |
| 192 build_keys = failure_key.split('/') | 272 if additional_tests_culprit_info: |
| 193 task = WfSwarmingTask.Get(*build_keys, step_name=step_name) | 273 step_try_jobs['try_jobs'].extend(additional_tests_culprit_info) |
| 194 if not task: | 274 |
| 195 continue | 275 |
| 196 classified_tests = task.classified_tests | 276 def _UpdateTryJobInfoBasedOnSwarming(step_tasks_info, try_jobs): |
| 197 step_no_platform = task.parameters.get( | 277 """ |
| 198 'ref_name', step_name.split()[0]) | 278 Args: |
| 199 for culprit_info in culprits_info.values(): | 279 step_tasks_info (dict): A dict of swarming task info for this step. |
| 200 if (culprit_info['try_job_key'] == failure_key and | 280 It is the result from _GenerateSwarmingTasksData. |
| 201 step_name == culprit_info['step_name']): | 281 try_jobs (list): A list to save try job data for the step, format as below: |
| 202 culprit_info['step_no_platform'] = step_no_platform | 282 [ |
| 203 if culprit_info['test_name'] in classified_tests.get('flaky_tests', []): | 283 { |
| 204 culprit_info['status'] = FLAKY | 284 'try_job_key': 'm/b/120' |
| 285 }, | |
| 286 { | |
| 287 'try_job_key': 'm/b/121' | |
| 288 }, | |
| 289 ... | |
| 290 ] | |
| 291 """ | |
| 292 additional_flakiness_list = [] | |
| 293 for try_job in try_jobs: | |
| 294 try_job_key = try_job['try_job_key'] | |
| 295 task = step_tasks_info.get('swarming_tasks', {}).get(try_job_key) | |
| 296 | |
| 297 if task['task_info']['status'] != wf_analysis_status.ANALYZED: | |
| 298 # There is someting wrong with swarming task or it's not done yet, | |
| 299 # no try job yet or ever. | |
| 300 try_job['status'] = result_status.NO_TRY_JOB_REASON_MAP[ | |
| 301 task['task_info']['status']] | |
| 302 try_job['tests'] = task.get('all_tests', []) | |
| 303 else: | |
| 304 # Swarming task is completed, group tests according to task result. | |
| 305 try_job['ref_name'] = task['ref_name'] | |
| 306 if task.get('reliable_tests'): | |
| 307 try_job['tests'] = task['reliable_tests'] | |
| 308 if task.get('flaky_tests'): | |
| 309 # Split this try job into two groups: flaky group and reliable group. | |
| 310 flaky_try_job = copy.deepcopy(try_job) | |
| 311 flaky_try_job['status'] = result_status.FLAKY | |
| 312 flaky_try_job['tests'] = task['flaky_tests'] | |
| 313 flaky_try_job['task_id'] = task['task_info']['task_id'] | |
| 314 flaky_try_job['task_url'] = task['task_info']['task_url'] | |
| 315 additional_flakiness_list.append(flaky_try_job) | |
| 316 elif task.get('flaky_tests'): # pragma: no cover | |
| 317 # All Flaky. | |
| 318 try_job['status'] = result_status.FLAKY | |
| 319 try_job['tests'] = task['flaky_tests'] | |
| 320 | |
| 321 if task['task_info'].get('task_id'): | |
| 322 try_job['task_id'] = task['task_info']['task_id'] | |
| 323 try_job['task_url'] = task['task_info']['task_url'] | |
| 324 | |
| 325 if additional_flakiness_list: | |
| 326 try_jobs.extend(additional_flakiness_list) | |
| 327 | |
| 328 | |
| 329 def _GetAllTryJobResultsForTest(failure_result_map, tasks_info): | |
| 330 culprits_info = defaultdict(lambda: defaultdict(list)) | |
| 331 if not tasks_info: | |
| 332 return culprits_info | |
| 333 | |
| 334 try_job_keys = set() | |
| 335 for step_name, step_failure_result_map in failure_result_map.iteritems(): | |
| 336 try_jobs = culprits_info[step_name]['try_jobs'] | |
| 337 | |
| 338 if isinstance(step_failure_result_map, dict): | |
| 339 for try_job_key in step_failure_result_map.values(): | |
| 340 if try_job_key not in try_job_keys: | |
| 341 try_job_dict = { | |
| 342 'try_job_key': try_job_key | |
| 343 } | |
| 344 try_jobs.append(try_job_dict) | |
| 345 try_job_keys.add(try_job_key) | |
| 346 else: | |
| 347 # Try job should only be triggered for swarming tests, because we cannot | |
| 348 # identify flaky tests for non-swarming tests. | |
| 349 try_job_dict = { | |
| 350 'try_job_key': step_failure_result_map | |
| 351 } | |
| 352 try_jobs.append(try_job_dict) | |
| 353 | |
| 354 _UpdateTryJobInfoBasedOnSwarming(tasks_info[step_name], try_jobs) | |
| 355 | |
| 356 for try_job_key in try_job_keys: | |
| 357 _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info) | |
| 358 | |
| 359 return culprits_info | |
| 360 | |
| 361 | |
| 362 def _GetTryJobResultForCompile(failure_result_map): | |
| 363 try_job_key = failure_result_map['compile'] | |
| 364 referred_build_keys = try_job_key.split('/') | |
| 365 culprit_info = defaultdict(lambda: defaultdict(list)) | |
| 366 | |
| 367 try_job = WfTryJob.Get(*referred_build_keys) | |
| 368 if not try_job or try_job.test_results: | |
| 369 return culprit_info | |
| 370 | |
| 371 try_job_result = ( | |
| 372 try_job.compile_results[-1] if try_job.compile_results else None) | |
| 373 | |
| 374 compile_try_job = { | |
| 375 'try_job_key': try_job_key, | |
| 376 'status': try_job.status | |
| 377 } | |
| 378 | |
| 379 if try_job_result: | |
| 380 if try_job_result.get('url'): | |
| 381 compile_try_job['try_job_url'] = try_job_result['url'] | |
| 382 compile_try_job['try_job_build_number'] = ( | |
| 383 _GetTryJobBuildNumber(try_job_result['url'])) | |
| 384 if try_job_result.get('culprit', {}).get('compile'): | |
| 385 compile_try_job['culprit'] = try_job_result['culprit']['compile'] | |
| 386 | |
| 387 culprit_info['compile']['try_jobs'].append(compile_try_job) | |
| 388 return culprit_info | |
| 205 | 389 |
| 206 | 390 |
| 207 def GetAllTryJobResults(master_name, builder_name, build_number): | 391 def GetAllTryJobResults(master_name, builder_name, build_number): |
| 208 culprits_info = {} | 392 culprits_info = {} |
| 209 try_job_keys = set() | 393 is_test_failure = True |
| 210 | 394 |
| 211 analysis = WfAnalysis.Get(master_name, builder_name, build_number) | 395 failure_result_map = _GetFailureResultMap( |
| 212 if not analysis: | 396 master_name, builder_name, build_number) |
| 213 return culprits_info | 397 |
| 214 | |
| 215 failure_result_map = analysis.failure_result_map | |
| 216 if failure_result_map: | 398 if failure_result_map: |
| 217 # failure_result_map uses step_names as keys and saves referred try_job_keys | 399 for step_name in failure_result_map: |
| 218 # If non-swarming, step_name and referred_try_job_key match directly as: | 400 if step_name == 'compile': |
| 219 # step_name: try_job_key | 401 is_test_failure = False |
| 220 # If swarming, add one more layer of tests, so the format would be: | 402 break |
| 221 # step_name: { | 403 if is_test_failure: |
| 222 # test_name1: try_job_key1, | 404 tasks_info = _GenerateSwarmingTasksData(failure_result_map) |
| 223 # test_name2: try_job_key2, | 405 culprits_info = _GetAllTryJobResultsForTest( |
| 224 # ... | 406 failure_result_map, tasks_info) |
| 225 # } | 407 else: |
| 226 for step_name, step_failure_result_map in failure_result_map.iteritems(): | 408 culprits_info = _GetTryJobResultForCompile(failure_result_map) |
| 227 if isinstance(step_failure_result_map, dict): | |
| 228 step_refering_keys = set() | |
| 229 for failed_test, try_job_key in step_failure_result_map.iteritems(): | |
| 230 step_test_key = '%s-%s' % (step_name, failed_test) | |
| 231 culprits_info[step_test_key] = { | |
| 232 'step_name': step_name, | |
| 233 'test_name': failed_test, | |
| 234 'try_job_key': try_job_key | |
| 235 } | |
| 236 step_refering_keys.add(try_job_key) | |
| 237 | |
| 238 _UpdateTryJobCulpritUsingSwarmingTask( | |
| 239 step_name, step_refering_keys, culprits_info) | |
| 240 try_job_keys.update(step_refering_keys) | |
| 241 else: | |
| 242 culprits_info[step_name] = { | |
| 243 'step_name': step_name, | |
| 244 'test_name': 'N/A', | |
| 245 'try_job_key': step_failure_result_map | |
| 246 } | |
| 247 try_job_keys.add(step_failure_result_map) | |
| 248 | |
| 249 for try_job_key in try_job_keys: | |
| 250 _GetCulpritInfoForTryJobResult(try_job_key, culprits_info) | |
| 251 | 409 |
| 252 return culprits_info | 410 return culprits_info |
| OLD | NEW |