Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright 2016 The Chromium Authors. All rights reserved. | 1 # Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 from collections import defaultdict | 5 from collections import defaultdict |
| 6 import copy | 6 import copy |
| 7 | 7 |
| 8 from handlers import result_status | |
| 8 from model import wf_analysis_status | 9 from model import wf_analysis_status |
| 9 from model.wf_analysis import WfAnalysis | 10 from model.wf_analysis import WfAnalysis |
| 10 from model.wf_swarming_task import WfSwarmingTask | 11 from model.wf_swarming_task import WfSwarmingTask |
| 11 from model.wf_try_job import WfTryJob | 12 from model.wf_try_job import WfTryJob |
| 12 from waterfall import buildbot | 13 from waterfall import buildbot |
| 13 from waterfall import waterfall_config | 14 from waterfall import waterfall_config |
| 14 | 15 |
| 15 | 16 |
| 16 FLAKY = 'Flaky' | 17 def _GetFailureResultMap(master_name, builder_name, build_number): |
| 18 analysis = WfAnalysis.Get(master_name, builder_name, build_number) | |
| 19 if not analysis: | |
| 20 return None, None | |
| 21 | |
| 22 return analysis.result, analysis.failure_result_map | |
| 17 | 23 |
| 18 | 24 |
| 19 def GenerateSwarmingTasksData(master_name, builder_name, build_number): | 25 def _GetAllTestsForASwarmingTask(task_key, step_failure_result_map): |
| 26 all_tests = set() | |
| 27 for test_name, test_task_key in step_failure_result_map.iteritems(): | |
| 28 if task_key == test_task_key: | |
| 29 all_tests.add(test_name) | |
| 30 return list(all_tests) | |
| 31 | |
| 32 | |
| 33 def _GenerateSwarmingTasksData(failure_result_map): | |
| 20 """Collects info for all related swarming tasks. | 34 """Collects info for all related swarming tasks. |
| 21 | 35 |
| 22 Returns: A dict as below: | 36 Returns: A dict as below: |
| 23 { | 37 { |
| 24 'step1': { | 38 'step1': { |
| 25 'swarming_tasks': [ | 39 'swarming_tasks': { |
| 26 { | 40 'm/b/121': { |
| 27 'status': 'Completed', | 41 'task_info': { |
| 28 'task_id': 'task1', | 42 'status': 'Completed', |
| 29 'task_url': ( | 43 'task_id': 'task1', |
| 30 'https://chromium-swarm.appspot.com/user/task/task1'), | 44 'task_url': ('https://chromium-swarm.appspot.com/user' |
| 31 'tests': ['test2'] | 45 '/task/task1') |
| 32 }, | 46 }, |
| 33 { | 47 'all_tests': ['test2', 'test3', 'test4'], |
| 34 'status': 'Completed', | 48 'reliable_tests': ['test2'], |
| 35 'task_id': 'task0', | 49 'flaky_tests': ['test3', 'test4'] |
| 36 'task_url': ( | |
| 37 'https://chromium-swarm.appspot.com/user/task/task0'), | |
| 38 'tests': ['test1'] | |
| 39 } | |
| 40 ], | |
| 41 'tests': { | |
| 42 'test1': { | |
| 43 'status': 'Completed', | |
| 44 'task_id': 'task0', | |
| 45 'task_url': ( | |
| 46 'https://chromium-swarm.appspot.com/user/task/task0') | |
| 47 }, | |
| 48 'test2': { | |
| 49 'status': 'Completed', | |
| 50 'task_id': 'task1', | |
| 51 'task_url': ( | |
| 52 'https://chromium-swarm.appspot.com/user/task/task1') | |
| 53 } | 50 } |
| 54 } | 51 } |
| 55 }, | 52 }, |
| 56 'step2': { | 53 'step2': { |
| 57 'swarming_tasks': [ | 54 'swarming_tasks': { |
| 58 { | 55 'm/b/121': { |
| 59 'status': 'Pending' | 56 'task_info': { |
| 57 'status': 'Pending' | |
| 58 }, | |
| 59 'all_tests': ['test1'] | |
| 60 } | 60 } |
| 61 ], | 61 } |
| 62 'tests': { | 62 }, |
| 63 'test1': { | 63 'step3': { |
| 64 'status': 'Pending' | 64 'swarming_tasks': { |
| 65 'm/b/121': { | |
| 66 'task_info': { | |
| 67 'status': 'No swarming rerun found' | |
| 68 }, | |
| 69 'all_tests': ['test1'] | |
| 65 } | 70 } |
| 66 } | 71 } |
| 67 } | 72 } |
| 68 } | 73 } |
| 69 """ | 74 """ |
| 70 tasks_info = defaultdict(dict) | |
| 71 | 75 |
| 72 analysis = WfAnalysis.Get(master_name, builder_name, build_number) | 76 tasks_info = defaultdict(lambda: defaultdict(lambda: defaultdict(dict))) |
| 73 if not analysis: | |
| 74 return tasks_info | |
| 75 | 77 |
| 76 failure_result_map = analysis.failure_result_map | 78 swarming_server = waterfall_config.GetSwarmingSettings()['server_host'] |
| 77 if failure_result_map: | 79 for step_name, failure in failure_result_map.iteritems(): |
| 78 for step_name, failure in failure_result_map.iteritems(): | 80 step_tasks_info = tasks_info[step_name]['swarming_tasks'] |
| 79 if isinstance(failure, dict): | 81 if isinstance(failure, dict): |
| 80 # Only trigger swarming task for swarming test failures. | 82 # Only swarming test failures have swarming re-runs. |
| 81 key_test_map = defaultdict(list) | 83 swarming_task_keys = set(failure.values()) |
| 82 for test_name, first_failure_key in failure.iteritems(): | |
| 83 key_test_map[first_failure_key].append(test_name) | |
| 84 | 84 |
| 85 tasks_info[step_name]['swarming_tasks'] = [] | 85 for key in swarming_task_keys: |
| 86 tasks_info[step_name]['tests'] = defaultdict(dict) | 86 task_dict = step_tasks_info[key] |
| 87 step_tasks_info = tasks_info[step_name]['swarming_tasks'] | 87 referred_build_keys = key.split('/') |
| 88 tests = tasks_info[step_name]['tests'] | 88 task = WfSwarmingTask.Get(*referred_build_keys, step_name=step_name) |
| 89 for key, test_names in key_test_map.iteritems(): | 89 if not task: # In case task got manually removed from data store. |
| 90 referred_build_keys = key.split('/') | |
| 91 task = WfSwarmingTask.Get(*referred_build_keys, step_name=step_name) | |
| 92 if not task: | |
| 93 continue | |
| 94 task_info = { | 90 task_info = { |
| 95 'status': wf_analysis_status.SWARMING_STATUS_TO_DESCRIPTION.get( | 91 'status': result_status.NO_SWARMING_TASK_FOUND |
| 96 task.status) | |
| 97 } | 92 } |
| 98 if task.task_id: | 93 task_dict['all_tests'] = _GetAllTestsForASwarmingTask(key, failure) |
| 94 else: | |
| 95 task_info = { | |
| 96 'status': task.status | |
| 97 } | |
| 98 | |
| 99 task_dict['all_tests'] = ( | |
| 100 _GetAllTestsForASwarmingTask(key, failure) | |
| 101 if not (task.parameters and task.parameters.get('tests')) | |
| 102 else task.parameters['tests']) | |
| 103 | |
| 104 # Get the step name without platform. | |
| 105 # This value should have been saved in task.parameters; | |
| 106 # in case of no such value saved, split the step_name. | |
| 107 task_dict['ref_name'] = ( | |
| 108 step_name.split()[0] | |
| 109 if not task.parameters or not task.parameters.get('ref_name') | |
| 110 else task.parameters['ref_name']) | |
| 111 | |
| 112 if task.task_id: # Swarming rerun has started. | |
| 99 task_info['task_id'] = task.task_id | 113 task_info['task_id'] = task.task_id |
| 100 task_info['task_url'] = 'https://%s/user/task/%s' % ( | 114 task_info['task_url'] = 'https://%s/user/task/%s' % ( |
| 101 waterfall_config.GetSwarmingSettings()['server_host'], | 115 swarming_server, task.task_id) |
| 102 task.task_id) | 116 if task.classified_tests: |
| 117 # Swarming rerun has completed. | |
| 118 # Use its result to get reliable and flaky tests. | |
| 119 # If task has not completed, there will be no try job yet, | |
| 120 # the result will be grouped in unclassified failures temporarily. | |
| 121 task_dict['reliable_tests'] = task.classified_tests.get( | |
| 122 'reliable_tests', []) | |
| 123 task_dict['flaky_tests'] = task.classified_tests.get( | |
| 124 'flaky_tests', []) | |
| 103 | 125 |
| 104 for test_name in test_names: | 126 task_dict['task_info'] = task_info |
| 105 tests[test_name] = copy.deepcopy(task_info) | 127 else: |
| 106 | 128 step_tasks_info[failure] = { |
| 107 task_info['tests'] = test_names | 129 'task_info': { |
| 108 step_tasks_info.append(task_info) | 130 'status': result_status.NON_SWARMING_NO_RERUN |
| 131 } | |
| 132 } | |
| 109 | 133 |
| 110 return tasks_info | 134 return tasks_info |
| 111 | 135 |
| 112 | 136 |
| 137 def GetSwarmingTaskInfo(master_name, builder_name, build_number): | |
| 138 _, failure_result_map = _GetFailureResultMap( | |
| 139 master_name, builder_name, build_number) | |
| 140 return ( | |
| 141 _GenerateSwarmingTasksData(failure_result_map) | |
| 142 if failure_result_map else {}) | |
| 143 | |
| 144 | |
| 113 def _GetTryJobBuildNumber(url): | 145 def _GetTryJobBuildNumber(url): |
| 114 build_keys = buildbot.ParseBuildUrl(url) | 146 build_keys = buildbot.ParseBuildUrl(url) |
| 115 return build_keys[2] | 147 return build_keys[2] |
| 116 | 148 |
| 117 | 149 |
| 118 def _GetCulpritInfoForTryJobResult(try_job_key, culprits_info): | 150 def _OrganizeTryJobResultByCulprits(try_job_culprits): |
| 151 """Re-organize try job culprits by revision. | |
| 152 | |
| 153 Args: | |
| 154 try_job_culprits (dict): A dict of culprits for one step organized by test: | |
| 155 { | |
| 156 'tests': { | |
| 157 'a_test1': { | |
| 158 'revision': 'rev1', | |
| 159 'commit_position': '1', | |
| 160 'review_url': 'url_1' | |
| 161 }, | |
| 162 'a_test2': { | |
| 163 'revision': 'rev1', | |
| 164 'commit_position': '1', | |
| 165 'review_url': 'url_1' | |
| 166 } | |
| 167 } | |
| 168 } | |
| 169 Returns: | |
| 170 A dict of culprits for one step organized by revison: | |
| 171 { | |
| 172 'rev1': { | |
| 173 'revision': 'rev1', | |
| 174 'commit_position': '1', | |
| 175 'review_url': 'url_1', | |
| 176 'tests': ['a_test1', 'a_test2'] | |
| 177 } | |
| 178 } | |
| 179 """ | |
| 180 if not try_job_culprits or not try_job_culprits.get('tests'): | |
| 181 return {} | |
| 182 | |
| 183 organized_culprits = {} | |
| 184 for test_name, culprit in try_job_culprits['tests'].iteritems(): | |
| 185 revision = culprit['revision'] | |
| 186 if organized_culprits.get(revision): | |
| 187 organized_culprits[revision]['failed_tests'].append(test_name) | |
| 188 else: | |
| 189 organized_culprits[revision] = culprit | |
| 190 organized_culprits[revision]['failed_tests'] = [test_name] | |
| 191 | |
| 192 return organized_culprits | |
| 193 | |
| 194 | |
| 195 def _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info): | |
| 119 referred_build_keys = try_job_key.split('/') | 196 referred_build_keys = try_job_key.split('/') |
| 120 try_job = WfTryJob.Get(*referred_build_keys) | 197 try_job = WfTryJob.Get(*referred_build_keys) |
| 121 if not try_job: | 198 if not try_job or try_job.compile_results: |
| 122 return | 199 return |
| 123 | 200 |
| 124 if try_job.compile_results: | 201 try_job_result = try_job.test_results[-1] if try_job.test_results else None |
| 125 try_job_result = try_job.compile_results[-1] | 202 |
| 126 elif try_job.test_results: | 203 for step_try_jobs in culprits_info.values(): |
| 127 try_job_result = try_job.test_results[-1] | 204 # If try job found different culprits for each test, split tests by culprit. |
| 128 else: | 205 additional_tests_culprit_info = [] |
| 129 try_job_result = None | 206 for try_job_info in step_try_jobs['try_jobs']: |
| 130 | 207 if (try_job_key != try_job_info['try_job_key'] |
| 131 additional_tests_culprit_info = {} | 208 or try_job_info.get('status')): |
| 132 for culprit_info in culprits_info.values(): | 209 # Conditions that try_job_info has status are: |
| 133 if culprit_info['try_job_key'] != try_job_key: | 210 # If there is no swarming task, there won't be try job; |
| 134 continue | 211 # If the swarming task is not completed yet, there won't be try job yet; |
| 135 | 212 # If there are flaky tests found, those tests will be marked as flaky, |
| 136 # Only include try job result for reliable tests. | 213 # and no try job for them will be triggered. |
| 137 # Flaky tests have been marked as 'Flaky'. | 214 continue |
| 138 culprit_info['status'] = ( | 215 |
| 139 wf_analysis_status.TRY_JOB_STATUS_TO_DESCRIPTION[try_job.status] | 216 try_job_info['status'] = try_job.status |
| 140 if not culprit_info.get('status') else culprit_info['status']) | 217 if try_job_result: |
| 141 | 218 # Needs to use ref_name to match step_name in try job. |
| 142 if try_job_result and culprit_info['status'] != FLAKY: | 219 ref_name = try_job_info['ref_name'] |
| 143 if try_job_result.get('url'): | 220 # Saves try job information. |
| 144 culprit_info['try_job_url'] = try_job_result['url'] | 221 if try_job_result.get('url'): |
| 145 culprit_info['try_job_build_number'] = ( | 222 try_job_info['try_job_url'] = try_job_result['url'] |
| 146 _GetTryJobBuildNumber(try_job_result['url'])) | 223 try_job_info['try_job_build_number'] = ( |
| 147 if try_job_result.get('culprit'): | 224 _GetTryJobBuildNumber(try_job_result['url'])) |
| 148 try_job_culprits = try_job_result['culprit'] | 225 |
| 149 step = culprit_info.get('step_no_platform', culprit_info['step_name']) | 226 if (try_job_result.get('culprit') and |
| 150 test = culprit_info['test_name'] | 227 try_job_result['culprit'].get(ref_name)): |
| 151 | 228 # Saves try job culprits information. |
| 152 if test == 'N/A': # Only step level. | 229 |
| 153 if try_job_culprits.get(step, {}).get('tests'): | 230 # Uses culprits to group tests. |
| 154 # try job results has specified tests. | 231 culprit_tests_map = _OrganizeTryJobResultByCulprits( |
| 155 step_culprits = try_job_culprits[step]['tests'] | 232 try_job_result['culprit'][ref_name]) |
| 156 for test_name, try_job_culprit in step_culprits.iteritems(): | 233 unrgouped_tests = try_job_info['tests'] |
| 157 additional_test_key = '%s-%s' % (step, test_name) | 234 list_of_culprits = [] |
| 158 additional_tests_culprit_info[additional_test_key] = { | 235 for culprit_info in culprit_tests_map.values(): |
| 159 'step_name': step, | 236 failed_tests = culprit_info['failed_tests'] |
| 160 'test_name': test_name, | 237 list_of_culprits.append(culprit_info) |
| 161 'try_job_key': try_job_key, | 238 # Gets tests that haven't been grouped. |
| 162 'status': culprit_info['status'], | 239 unrgouped_tests = list( |
| 163 'try_job_url': culprit_info['try_job_url'], | 240 set(unrgouped_tests) ^ set(failed_tests)) |
| 164 'try_job_build_number': culprit_info['try_job_build_number'], | 241 if not unrgouped_tests: |
| 165 'revision': try_job_culprit.get('revision'), | 242 # All tests have been grouped. |
| 166 'commit_position': try_job_culprit.get('commit_position'), | 243 break |
| 167 'review_url': try_job_culprit.get('review_url') | 244 |
| 168 } | 245 index_start = 1 |
| 169 continue | 246 if unrgouped_tests: |
| 247 # There are tests don't have try job culprits. | |
| 248 # Group these tests together. | |
| 249 # Save them in current try_job_info. | |
| 250 try_job_info['tests'] = unrgouped_tests | |
| 251 try_job_info['culprit'] = {} | |
| 252 # Saves all the tests that have culprits later. | |
| 253 index_start = 0 | |
| 170 else: | 254 else: |
| 171 # For historical culprit found by try job for compile, | 255 # Saves the first culprit in current try_job_info. |
| 172 # step name is not recorded. | 256 # Saves all the other culprits later. |
| 173 culprit = try_job_culprits.get(step) or try_job_culprits | 257 try_job_info['culprit'] = { |
| 174 elif test in try_job_culprits.get(step, {}).get('tests'): | 258 'revision': list_of_culprits[0]['revision'], |
| 175 culprit = try_job_culprits[step]['tests'][test] | 259 'commit_position': list_of_culprits[0]['commit_position'], |
| 176 else: # pragma: no cover | 260 'review_url': list_of_culprits[0]['review_url'] |
| 177 continue # No culprit for test found. | 261 } |
| 178 | 262 try_job_info['tests'] = list_of_culprits[0]['failed_tests'] |
| 179 culprit_info['revision'] = culprit.get('revision') | 263 |
| 180 culprit_info['commit_position'] = culprit.get('commit_position') | 264 for n in xrange(index_start, len(list_of_culprits)): |
| 181 culprit_info['review_url'] = culprit.get('review_url') | 265 # Appends the rest of test groups to step_try_jobs['try_jobs']. |
| 182 | 266 iterate_culprit = list_of_culprits[n] |
| 183 if additional_tests_culprit_info: | 267 tmp_try_job_info = copy.deepcopy(try_job_info) |
| 184 for key, test_culprit_info in additional_tests_culprit_info.iteritems(): | 268 tmp_try_job_info['culprit'] = { |
| 185 culprits_info.pop(test_culprit_info['step_name'], None) | 269 'revision': iterate_culprit['revision'], |
| 186 culprits_info[key] = test_culprit_info | 270 'commit_position': iterate_culprit['commit_position'], |
| 187 | 271 'review_url': iterate_culprit['review_url'] |
| 188 | 272 } |
| 189 def _UpdateTryJobCulpritUsingSwarmingTask( | 273 tmp_try_job_info['tests'] = iterate_culprit['failed_tests'] |
| 190 step_name, failure_key_set, culprits_info): | 274 additional_tests_culprit_info.append(tmp_try_job_info) |
| 191 for failure_key in failure_key_set: | 275 |
| 192 build_keys = failure_key.split('/') | 276 if additional_tests_culprit_info: |
| 193 task = WfSwarmingTask.Get(*build_keys, step_name=step_name) | 277 step_try_jobs['try_jobs'].extend(additional_tests_culprit_info) |
| 194 if not task: | 278 |
| 195 continue | 279 |
| 196 classified_tests = task.classified_tests | 280 def _UpdateTryJobInfoBasedOnSwarming(step_tasks_info, try_jobs): |
| 197 step_no_platform = task.parameters.get( | 281 """ |
| 198 'ref_name', step_name.split()[0]) | 282 Args: |
| 199 for culprit_info in culprits_info.values(): | 283 step_tasks_info (dict): A dict of swarming task info for this step. |
| 200 if (culprit_info['try_job_key'] == failure_key and | 284 It is the result from _GenerateSwarmingTasksData. |
| 201 step_name == culprit_info['step_name']): | 285 try_jobs (list): A list to save try job data for the step, format as below: |
| 202 culprit_info['step_no_platform'] = step_no_platform | 286 [ |
| 203 if culprit_info['test_name'] in classified_tests.get('flaky_tests', []): | 287 { |
| 204 culprit_info['status'] = FLAKY | 288 'try_job_key': 'm/b/120' |
| 289 }, | |
| 290 { | |
| 291 'try_job_key': 'm/b/121' | |
| 292 }, | |
| 293 ... | |
| 294 ] | |
| 295 """ | |
| 296 additional_flakiness_list = [] | |
| 297 for try_job in try_jobs: | |
| 298 try_job_key = try_job['try_job_key'] | |
| 299 task = step_tasks_info.get('swarming_tasks', {}).get(try_job_key) | |
| 300 | |
| 301 if task['task_info']['status'] != wf_analysis_status.ANALYZED: | |
| 302 # There is someting wrong with swarming task or it's not done yet, | |
| 303 # no try job yet or ever. | |
| 304 try_job['status'] = result_status.NO_TRY_JOB_REASON_MAP[ | |
| 305 task['task_info']['status']] | |
| 306 try_job['tests'] = task.get('all_tests', []) | |
| 307 else: | |
| 308 # Swarming task is completed, group tests according to task result. | |
| 309 try_job['ref_name'] = task['ref_name'] | |
| 310 if task.get('reliable_tests'): | |
| 311 try_job['tests'] = task['reliable_tests'] | |
| 312 if task.get('flaky_tests'): | |
| 313 # Split this try job into two groups: flaky group and reliable group. | |
| 314 flaky_try_job = copy.deepcopy(try_job) | |
| 315 flaky_try_job['status'] = result_status.FLAKY | |
| 316 flaky_try_job['tests'] = task['flaky_tests'] | |
| 317 flaky_try_job['task_id'] = task['task_info']['task_id'] | |
| 318 flaky_try_job['task_url'] = task['task_info']['task_url'] | |
| 319 additional_flakiness_list.append(flaky_try_job) | |
| 320 elif task.get('flaky_tests'): # pragma: no cover | |
| 321 # All Flaky. | |
| 322 try_job['status'] = result_status.FLAKY | |
| 323 try_job['tests'] = task['flaky_tests'] | |
| 324 | |
| 325 if task['task_info'].get('task_id'): | |
| 326 try_job['task_id'] = task['task_info']['task_id'] | |
| 327 try_job['task_url'] = task['task_info']['task_url'] | |
| 328 | |
| 329 try_jobs.extend(additional_flakiness_list) | |
| 330 | |
| 331 | |
| 332 def _GetAllTryJobResultsForTest(failure_result_map, tasks_info): | |
| 333 culprits_info = defaultdict(lambda: defaultdict(list)) | |
| 334 if not tasks_info: | |
| 335 return culprits_info | |
| 336 | |
| 337 try_job_keys = set() | |
| 338 for step_name, step_failure_result_map in failure_result_map.iteritems(): | |
| 339 try_jobs = culprits_info[step_name]['try_jobs'] | |
| 340 | |
| 341 if isinstance(step_failure_result_map, dict): | |
| 342 step_try_job_keys = set() | |
| 343 for try_job_key in step_failure_result_map.values(): | |
| 344 if try_job_key not in step_try_job_keys: | |
| 345 try_job_dict = { | |
| 346 'try_job_key': try_job_key | |
| 347 } | |
| 348 try_jobs.append(try_job_dict) | |
| 349 step_try_job_keys.add(try_job_key) | |
| 350 try_job_keys.update(step_try_job_keys) | |
|
stgao
2016/03/29 06:22:44
Just double check: should this be outside the for
chanli
2016/03/29 06:27:59
Yes, logically it should be outside of this for lo
stgao
2016/04/07 00:30:04
Fixed in https://codereview.chromium.org/186688300
| |
| 351 else: | |
| 352 # Try job should only be triggered for swarming tests, because we cannot | |
| 353 # identify flaky tests for non-swarming tests. | |
| 354 try_job_dict = { | |
| 355 'try_job_key': step_failure_result_map | |
| 356 } | |
| 357 try_jobs.append(try_job_dict) | |
| 358 | |
| 359 _UpdateTryJobInfoBasedOnSwarming(tasks_info[step_name], try_jobs) | |
| 360 | |
| 361 for try_job_key in try_job_keys: | |
| 362 _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info) | |
| 363 | |
| 364 return culprits_info | |
| 365 | |
| 366 | |
| 367 def _GetTryJobResultForCompile(failure_result_map): | |
| 368 try_job_key = failure_result_map['compile'] | |
| 369 referred_build_keys = try_job_key.split('/') | |
| 370 culprit_info = defaultdict(lambda: defaultdict(list)) | |
| 371 | |
| 372 try_job = WfTryJob.Get(*referred_build_keys) | |
| 373 if not try_job or try_job.test_results: | |
| 374 return culprit_info | |
| 375 | |
| 376 try_job_result = ( | |
| 377 try_job.compile_results[-1] if try_job.compile_results else None) | |
| 378 | |
| 379 compile_try_job = { | |
| 380 'try_job_key': try_job_key, | |
| 381 'status': try_job.status | |
| 382 } | |
| 383 | |
| 384 if try_job_result: | |
| 385 if try_job_result.get('url'): | |
| 386 compile_try_job['try_job_url'] = try_job_result['url'] | |
| 387 compile_try_job['try_job_build_number'] = ( | |
| 388 _GetTryJobBuildNumber(try_job_result['url'])) | |
| 389 if try_job_result.get('culprit', {}).get('compile'): | |
| 390 compile_try_job['culprit'] = try_job_result['culprit']['compile'] | |
| 391 | |
| 392 culprit_info['compile']['try_jobs'].append(compile_try_job) | |
| 393 return culprit_info | |
| 205 | 394 |
| 206 | 395 |
| 207 def GetAllTryJobResults(master_name, builder_name, build_number): | 396 def GetAllTryJobResults(master_name, builder_name, build_number): |
| 208 culprits_info = {} | 397 culprits_info = {} |
| 209 try_job_keys = set() | 398 is_test_failure = True |
| 210 | 399 |
| 211 analysis = WfAnalysis.Get(master_name, builder_name, build_number) | 400 analysis_result, failure_result_map = _GetFailureResultMap( |
| 212 if not analysis: | 401 master_name, builder_name, build_number) |
| 213 return culprits_info | 402 |
| 214 | |
| 215 failure_result_map = analysis.failure_result_map | |
| 216 if failure_result_map: | 403 if failure_result_map: |
| 217 # failure_result_map uses step_names as keys and saves referred try_job_keys | 404 for step_name in failure_result_map: |
| 218 # If non-swarming, step_name and referred_try_job_key match directly as: | 405 if step_name.lower() == 'compile': |
| 219 # step_name: try_job_key | 406 is_test_failure = False |
| 220 # If swarming, add one more layer of tests, so the format would be: | 407 break |
| 221 # step_name: { | 408 if is_test_failure: |
| 222 # test_name1: try_job_key1, | 409 tasks_info = _GenerateSwarmingTasksData(failure_result_map) |
| 223 # test_name2: try_job_key2, | 410 culprits_info = _GetAllTryJobResultsForTest( |
| 224 # ... | 411 failure_result_map, tasks_info) |
| 225 # } | 412 else: |
| 226 for step_name, step_failure_result_map in failure_result_map.iteritems(): | 413 culprits_info = _GetTryJobResultForCompile(failure_result_map) |
| 227 if isinstance(step_failure_result_map, dict): | 414 elif analysis_result: |
| 228 step_refering_keys = set() | 415 for failure in analysis_result['failures']: |
| 229 for failed_test, try_job_key in step_failure_result_map.iteritems(): | 416 step_name = failure['step_name'] |
| 230 step_test_key = '%s-%s' % (step_name, failed_test) | 417 tests = [] |
| 231 culprits_info[step_test_key] = { | 418 for test in failure.get('tests', []): |
| 232 'step_name': step_name, | 419 tests.append(test['test_name']) |
| 233 'test_name': failed_test, | 420 |
| 234 'try_job_key': try_job_key | 421 culprits_info[step_name] = { |
| 235 } | 422 'try_jobs': [ |
| 236 step_refering_keys.add(try_job_key) | 423 { |
| 237 | 424 'status': result_status.NO_FAILURE_RESULT_MAP, |
| 238 _UpdateTryJobCulpritUsingSwarmingTask( | 425 'tests': tests |
| 239 step_name, step_refering_keys, culprits_info) | 426 } |
| 240 try_job_keys.update(step_refering_keys) | 427 ] |
| 241 else: | 428 } |
| 242 culprits_info[step_name] = { | |
| 243 'step_name': step_name, | |
| 244 'test_name': 'N/A', | |
| 245 'try_job_key': step_failure_result_map | |
| 246 } | |
| 247 try_job_keys.add(step_failure_result_map) | |
| 248 | |
| 249 for try_job_key in try_job_keys: | |
| 250 _GetCulpritInfoForTryJobResult(try_job_key, culprits_info) | |
| 251 | |
| 252 return culprits_info | 429 return culprits_info |
| OLD | NEW |