Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright 2016 The Chromium Authors. All rights reserved. | 1 # Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 from collections import defaultdict | 5 from collections import defaultdict |
| 6 import copy | 6 import copy |
| 7 | 7 |
| 8 from handlers import result_status | |
| 8 from model import wf_analysis_status | 9 from model import wf_analysis_status |
| 9 from model.wf_analysis import WfAnalysis | 10 from model.wf_analysis import WfAnalysis |
| 10 from model.wf_swarming_task import WfSwarmingTask | 11 from model.wf_swarming_task import WfSwarmingTask |
| 11 from model.wf_try_job import WfTryJob | 12 from model.wf_try_job import WfTryJob |
| 12 from waterfall import buildbot | 13 from waterfall import buildbot |
| 13 from waterfall import waterfall_config | 14 from waterfall import waterfall_config |
| 14 | 15 |
| 15 | 16 |
| 16 FLAKY = 'Flaky' | 17 def _GetFailureResultMap(master_name, builder_name, build_number): |
| 18 analysis = WfAnalysis.Get(master_name, builder_name, build_number) | |
| 19 if not analysis: | |
| 20 return None, None | |
| 21 | |
| 22 return analysis.result, analysis.failure_result_map | |
| 17 | 23 |
| 18 | 24 |
| 19 def GenerateSwarmingTasksData(master_name, builder_name, build_number): | 25 def _GetAllTestsForASwarmingTask(task_key, step_failure_result_map): |
| 26 all_tests = set() | |
| 27 for test_name, test_task_key in step_failure_result_map.iteritems(): | |
| 28 if task_key == test_task_key: | |
| 29 all_tests.add(test_name) | |
| 30 return list(all_tests) | |
| 31 | |
| 32 | |
| 33 def _GenerateSwarmingTasksData(failure_result_map): | |
| 20 """Collects info for all related swarming tasks. | 34 """Collects info for all related swarming tasks. |
| 21 | 35 |
| 22 Returns: A dict as below: | 36 Returns: A dict as below: |
| 23 { | 37 { |
| 24 'step1': { | 38 'step1': { |
| 25 'swarming_tasks': [ | 39 'swarming_tasks': { |
| 26 { | 40 'm/b/121': { |
| 27 'status': 'Completed', | 41 'task_info': { |
| 28 'task_id': 'task1', | 42 'status': 'Completed', |
| 29 'task_url': ( | 43 'task_id': 'task1', |
| 30 'https://chromium-swarm.appspot.com/user/task/task1'), | 44 'task_url': ('https://chromium-swarm.appspot.com/user' |
| 31 'tests': ['test2'] | 45 '/task/task1') |
| 32 }, | 46 }, |
| 33 { | 47 'all_tests': ['test2', 'test3', 'test4'], |
| 34 'status': 'Completed', | 48 'reliable_tests': ['test2'], |
| 35 'task_id': 'task0', | 49 'flaky_tests': ['test3', 'test4'] |
| 36 'task_url': ( | |
| 37 'https://chromium-swarm.appspot.com/user/task/task0'), | |
| 38 'tests': ['test1'] | |
| 39 } | |
| 40 ], | |
| 41 'tests': { | |
| 42 'test1': { | |
| 43 'status': 'Completed', | |
| 44 'task_id': 'task0', | |
| 45 'task_url': ( | |
| 46 'https://chromium-swarm.appspot.com/user/task/task0') | |
| 47 }, | |
| 48 'test2': { | |
| 49 'status': 'Completed', | |
| 50 'task_id': 'task1', | |
| 51 'task_url': ( | |
| 52 'https://chromium-swarm.appspot.com/user/task/task1') | |
| 53 } | 50 } |
| 54 } | 51 } |
| 55 }, | 52 }, |
| 56 'step2': { | 53 'step2': { |
| 57 'swarming_tasks': [ | 54 'swarming_tasks': { |
| 58 { | 55 'm/b/121': { |
| 59 'status': 'Pending' | 56 'task_info': { |
| 57 'status': 'Pending' | |
| 58 }, | |
| 59 'all_tests': ['test1'] | |
| 60 } | 60 } |
| 61 ], | 61 } |
| 62 'tests': { | 62 }, |
| 63 'test1': { | 63 'step3': { |
| 64 'status': 'Pending' | 64 'swarming_tasks': { |
| 65 'm/b/121': { | |
| 66 'task_info': { | |
| 67 'status': 'No swarming rerun found' | |
| 68 }, | |
| 69 'all_tests': ['test1'] | |
| 65 } | 70 } |
| 66 } | 71 } |
| 67 } | 72 } |
| 68 } | 73 } |
| 69 """ | 74 """ |
| 70 tasks_info = defaultdict(dict) | |
| 71 | 75 |
| 72 analysis = WfAnalysis.Get(master_name, builder_name, build_number) | 76 tasks_info = defaultdict(lambda: defaultdict(lambda: defaultdict(dict))) |
| 73 if not analysis: | |
| 74 return tasks_info | |
| 75 | 77 |
| 76 failure_result_map = analysis.failure_result_map | 78 swarming_server = waterfall_config.GetSwarmingSettings()['server_host'] |
| 77 if failure_result_map: | 79 for step_name, failure in failure_result_map.iteritems(): |
| 78 for step_name, failure in failure_result_map.iteritems(): | 80 step_tasks_info = tasks_info[step_name]['swarming_tasks'] |
| 79 if isinstance(failure, dict): | 81 if isinstance(failure, dict): |
| 80 # Only trigger swarming task for swarming test failures. | 82 # Only swarming test failures have swarming re-runs. |
| 81 key_test_map = defaultdict(list) | 83 swarming_task_keys = set(failure.values()) |
| 82 for test_name, first_failure_key in failure.iteritems(): | |
| 83 key_test_map[first_failure_key].append(test_name) | |
| 84 | 84 |
| 85 tasks_info[step_name]['swarming_tasks'] = [] | 85 for key in swarming_task_keys: |
| 86 tasks_info[step_name]['tests'] = defaultdict(dict) | 86 task_dict = step_tasks_info[key] |
| 87 step_tasks_info = tasks_info[step_name]['swarming_tasks'] | 87 referred_build_keys = key.split('/') |
| 88 tests = tasks_info[step_name]['tests'] | 88 task = WfSwarmingTask.Get(*referred_build_keys, step_name=step_name) |
| 89 for key, test_names in key_test_map.iteritems(): | 89 if not task: # In case task got manually removed from data store. |
| 90 referred_build_keys = key.split('/') | |
| 91 task = WfSwarmingTask.Get(*referred_build_keys, step_name=step_name) | |
| 92 if not task: | |
| 93 continue | |
| 94 task_info = { | 90 task_info = { |
| 95 'status': wf_analysis_status.SWARMING_STATUS_TO_DESCRIPTION.get( | 91 'status': result_status.NO_SWARMING_TASK_FOUND |
| 96 task.status) | |
| 97 } | 92 } |
| 98 if task.task_id: | 93 task_dict['all_tests'] = _GetAllTestsForASwarmingTask(key, failure) |
| 94 else: | |
| 95 task_info = { | |
| 96 'status': task.status | |
| 97 } | |
| 98 | |
| 99 task_dict['all_tests'] = ( | |
| 100 _GetAllTestsForASwarmingTask(key, failure) | |
| 101 if not (task.parameters and task.parameters.get('tests')) | |
| 102 else task.parameters['tests']) | |
| 103 | |
| 104 # Get the step name without platform. | |
| 105 # This value should have been saved in task.parameters; | |
| 106 # in case of no such value saved, split the step_name. | |
| 107 task_dict['ref_name'] = ( | |
| 108 step_name.split()[0] | |
| 109 if not task.parameters or not task.parameters.get('ref_name') | |
| 110 else task.parameters['ref_name']) | |
| 111 | |
| 112 if task.task_id: # Swarming rerun has started. | |
|
lijeffrey
2016/03/28 23:35:04
nit: 2 spaces before #
chanli
2016/03/29 01:35:47
Done.
| |
| 99 task_info['task_id'] = task.task_id | 113 task_info['task_id'] = task.task_id |
| 100 task_info['task_url'] = 'https://%s/user/task/%s' % ( | 114 task_info['task_url'] = 'https://%s/user/task/%s' % ( |
| 101 waterfall_config.GetSwarmingSettings()['server_host'], | 115 swarming_server, task.task_id) |
| 102 task.task_id) | 116 if task.classified_tests: # Swarming rerun has result. |
| 117 task_dict['reliable_tests'] = task.classified_tests.get( | |
| 118 'reliable_tests', []) | |
| 119 task_dict['flaky_tests'] = task.classified_tests.get( | |
| 120 'flaky_tests', []) | |
| 103 | 121 |
| 104 for test_name in test_names: | 122 task_dict['task_info'] = task_info |
| 105 tests[test_name] = copy.deepcopy(task_info) | 123 else: |
| 106 | 124 step_tasks_info[failure] = { |
| 107 task_info['tests'] = test_names | 125 'task_info': { |
| 108 step_tasks_info.append(task_info) | 126 'status': result_status.NON_SWARMING_NO_RERUN |
| 127 } | |
| 128 } | |
| 109 | 129 |
| 110 return tasks_info | 130 return tasks_info |
| 111 | 131 |
| 112 | 132 |
| 133 def GetSwarmingTaskInfo(master_name, builder_name, build_number): | |
| 134 _, failure_result_map = _GetFailureResultMap( | |
| 135 master_name, builder_name, build_number) | |
| 136 return ( | |
| 137 _GenerateSwarmingTasksData(failure_result_map) | |
| 138 if failure_result_map else {}) | |
| 139 | |
| 140 | |
| 113 def _GetTryJobBuildNumber(url): | 141 def _GetTryJobBuildNumber(url): |
| 114 build_keys = buildbot.ParseBuildUrl(url) | 142 build_keys = buildbot.ParseBuildUrl(url) |
| 115 return build_keys[2] | 143 return build_keys[2] |
| 116 | 144 |
| 117 | 145 |
| 118 def _GetCulpritInfoForTryJobResult(try_job_key, culprits_info): | 146 def _OrganizeTryJobResultByCulprits(try_job_culprits): |
| 147 """Re-organize try job culprits by revision. | |
| 148 | |
| 149 Args: | |
| 150 try_job_culprits (dict): A dict of culprits for one step organized by test: | |
| 151 { | |
| 152 'tests': { | |
| 153 'a_test1': { | |
| 154 'revision': 'rev1', | |
| 155 'commit_position': '1', | |
| 156 'review_url': 'url_1' | |
| 157 }, | |
| 158 'a_test2': { | |
| 159 'revision': 'rev1', | |
| 160 'commit_position': '1', | |
| 161 'review_url': 'url_1' | |
| 162 } | |
| 163 } | |
| 164 } | |
| 165 Returns: | |
| 166 A dict of culprits for one step organized by revison: | |
| 167 { | |
| 168 'rev1': { | |
| 169 'revision': 'rev1', | |
| 170 'commit_position': '1', | |
| 171 'review_url': 'url_1', | |
| 172 'tests': ['a_test1', 'a_test2'] | |
| 173 } | |
| 174 } | |
| 175 """ | |
| 176 if not try_job_culprits or not try_job_culprits.get('tests'): | |
| 177 return {} | |
| 178 | |
| 179 organized_culprits = {} | |
| 180 for test_name, culprit in try_job_culprits['tests'].iteritems(): | |
| 181 revision = culprit['revision'] | |
| 182 if organized_culprits.get(revision): | |
| 183 organized_culprits[revision]['failed_tests'].append(test_name) | |
| 184 else: | |
| 185 organized_culprits[revision] = culprit | |
| 186 organized_culprits[revision]['failed_tests'] = [test_name] | |
| 187 | |
| 188 return organized_culprits | |
| 189 | |
| 190 | |
| 191 def _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info): | |
| 119 referred_build_keys = try_job_key.split('/') | 192 referred_build_keys = try_job_key.split('/') |
| 120 try_job = WfTryJob.Get(*referred_build_keys) | 193 try_job = WfTryJob.Get(*referred_build_keys) |
| 121 if not try_job: | 194 if not try_job or try_job.compile_results: |
| 122 return | 195 return |
| 123 | 196 |
| 124 if try_job.compile_results: | 197 try_job_result = try_job.test_results[-1] if try_job.test_results else None |
| 125 try_job_result = try_job.compile_results[-1] | 198 |
| 126 elif try_job.test_results: | 199 for step_try_jobs in culprits_info.values(): |
| 127 try_job_result = try_job.test_results[-1] | 200 # If try job found different culprits for each test, split tests by culprit. |
| 128 else: | 201 additional_tests_culprit_info = [] |
| 129 try_job_result = None | 202 for try_job_info in step_try_jobs['try_jobs']: |
| 130 | 203 if try_job_key != try_job_info['try_job_key']: |
| 131 additional_tests_culprit_info = {} | 204 continue |
| 132 for culprit_info in culprits_info.values(): | 205 |
| 133 if culprit_info['try_job_key'] != try_job_key: | 206 if try_job_info.get('status'): |
|
lijeffrey
2016/03/28 23:35:04
this if can be combined into 1 if
i.e.
if (try_j
chanli
2016/03/29 01:35:47
Done.
| |
| 134 continue | 207 # The try job has been updated by swarming task: |
| 135 | 208 # If there is no swarming task, there won't be try job as well; |
| 136 # Only include try job result for reliable tests. | 209 # If the swarming task is not completed yet, there won't be try job yet; |
| 137 # Flaky tests have been marked as 'Flaky'. | 210 # If there are flaky tests found, those tests will be marked as flaky. |
| 138 culprit_info['status'] = ( | 211 continue |
| 139 wf_analysis_status.TRY_JOB_STATUS_TO_DESCRIPTION[try_job.status] | 212 |
| 140 if not culprit_info.get('status') else culprit_info['status']) | 213 try_job_info['status'] = try_job.status |
| 141 | 214 if try_job_result: |
| 142 if try_job_result and culprit_info['status'] != FLAKY: | 215 # Needs to use ref_name to match step_name in try job. |
| 143 if try_job_result.get('url'): | 216 ref_name = try_job_info['ref_name'] |
| 144 culprit_info['try_job_url'] = try_job_result['url'] | 217 # Saves try job information. |
| 145 culprit_info['try_job_build_number'] = ( | 218 if try_job_result.get('url'): |
| 146 _GetTryJobBuildNumber(try_job_result['url'])) | 219 try_job_info['try_job_url'] = try_job_result['url'] |
| 147 if try_job_result.get('culprit'): | 220 try_job_info['try_job_build_number'] = ( |
| 148 try_job_culprits = try_job_result['culprit'] | 221 _GetTryJobBuildNumber(try_job_result['url'])) |
| 149 step = culprit_info.get('step_no_platform', culprit_info['step_name']) | 222 |
| 150 test = culprit_info['test_name'] | 223 if (try_job_result.get('culprit') and |
| 151 | 224 try_job_result['culprit'].get(ref_name)): |
| 152 if test == 'N/A': # Only step level. | 225 # Saves try job culprits information. |
| 153 if try_job_culprits.get(step, {}).get('tests'): | 226 |
| 154 # try job results has specified tests. | 227 # Uses culprits to group tests. |
| 155 step_culprits = try_job_culprits[step]['tests'] | 228 culprit_tests_map = _OrganizeTryJobResultByCulprits( |
| 156 for test_name, try_job_culprit in step_culprits.iteritems(): | 229 try_job_result['culprit'][ref_name]) |
| 157 additional_test_key = '%s-%s' % (step, test_name) | 230 unrgouped_tests = try_job_info['tests'] |
| 158 additional_tests_culprit_info[additional_test_key] = { | 231 list_of_culprits = [] |
| 159 'step_name': step, | 232 for culprit_info in culprit_tests_map.values(): |
| 160 'test_name': test_name, | 233 failed_tests = culprit_info['failed_tests'] |
| 161 'try_job_key': try_job_key, | 234 list_of_culprits.append(culprit_info) |
| 162 'status': culprit_info['status'], | 235 # Gets tests that haven't been grouped. |
| 163 'try_job_url': culprit_info['try_job_url'], | 236 unrgouped_tests = list( |
| 164 'try_job_build_number': culprit_info['try_job_build_number'], | 237 set(unrgouped_tests) ^ set(failed_tests)) |
| 165 'revision': try_job_culprit.get('revision'), | 238 if not unrgouped_tests: |
| 166 'commit_position': try_job_culprit.get('commit_position'), | 239 # All tests have been grouped. |
| 167 'review_url': try_job_culprit.get('review_url') | 240 break |
| 168 } | 241 |
| 169 continue | 242 index_start = 1 |
| 243 if unrgouped_tests: | |
| 244 # There are tests don't have try job culprits. | |
| 245 # Group these tests together. | |
| 246 # Save them in current try_job_info. | |
| 247 try_job_info['tests'] = unrgouped_tests | |
| 248 try_job_info['culprit'] = {} | |
| 249 # Saves all the tests that have culprits later. | |
| 250 index_start = 0 | |
| 170 else: | 251 else: |
| 171 # For historical culprit found by try job for compile, | 252 # Saves the first culprit in current try_job_info. |
| 172 # step name is not recorded. | 253 # Saves all the other culprits later. |
| 173 culprit = try_job_culprits.get(step) or try_job_culprits | 254 try_job_info['culprit'] = { |
| 174 elif test in try_job_culprits.get(step, {}).get('tests'): | 255 'revision': list_of_culprits[0]['revision'], |
| 175 culprit = try_job_culprits[step]['tests'][test] | 256 'commit_position': list_of_culprits[0]['commit_position'], |
| 176 else: # pragma: no cover | 257 'review_url': list_of_culprits[0]['review_url'] |
| 177 continue # No culprit for test found. | 258 } |
| 178 | 259 try_job_info['tests'] = list_of_culprits[0]['failed_tests'] |
| 179 culprit_info['revision'] = culprit.get('revision') | 260 |
| 180 culprit_info['commit_position'] = culprit.get('commit_position') | 261 for n in xrange(index_start, len(list_of_culprits)): |
| 181 culprit_info['review_url'] = culprit.get('review_url') | 262 # Appends the rest of test groups to step_try_jobs['try_jobs']. |
| 182 | 263 iterate_culprit = list_of_culprits[n] |
| 183 if additional_tests_culprit_info: | 264 tmp_try_job_info = copy.deepcopy(try_job_info) |
| 184 for key, test_culprit_info in additional_tests_culprit_info.iteritems(): | 265 tmp_try_job_info['culprit'] = { |
| 185 culprits_info.pop(test_culprit_info['step_name'], None) | 266 'revision': iterate_culprit['revision'], |
| 186 culprits_info[key] = test_culprit_info | 267 'commit_position': iterate_culprit['commit_position'], |
| 187 | 268 'review_url': iterate_culprit['review_url'] |
| 188 | 269 } |
| 189 def _UpdateTryJobCulpritUsingSwarmingTask( | 270 tmp_try_job_info['tests'] = iterate_culprit['failed_tests'] |
| 190 step_name, failure_key_set, culprits_info): | 271 additional_tests_culprit_info.append(tmp_try_job_info) |
| 191 for failure_key in failure_key_set: | 272 |
| 192 build_keys = failure_key.split('/') | 273 if additional_tests_culprit_info: |
| 193 task = WfSwarmingTask.Get(*build_keys, step_name=step_name) | 274 step_try_jobs['try_jobs'].extend(additional_tests_culprit_info) |
| 194 if not task: | 275 |
| 195 continue | 276 |
| 196 classified_tests = task.classified_tests | 277 def _UpdateTryJobInfoBasedOnSwarming(step_tasks_info, try_jobs): |
| 197 step_no_platform = task.parameters.get( | 278 """ |
| 198 'ref_name', step_name.split()[0]) | 279 Args: |
| 199 for culprit_info in culprits_info.values(): | 280 step_tasks_info (dict): A dict of swarming task info for this step. |
| 200 if (culprit_info['try_job_key'] == failure_key and | 281 It is the result from _GenerateSwarmingTasksData. |
| 201 step_name == culprit_info['step_name']): | 282 try_jobs (list): A list to save try job data for the step, format as below: |
| 202 culprit_info['step_no_platform'] = step_no_platform | 283 [ |
| 203 if culprit_info['test_name'] in classified_tests.get('flaky_tests', []): | 284 { |
| 204 culprit_info['status'] = FLAKY | 285 'try_job_key': 'm/b/120' |
| 286 }, | |
| 287 { | |
| 288 'try_job_key': 'm/b/121' | |
| 289 }, | |
| 290 ... | |
| 291 ] | |
| 292 """ | |
| 293 additional_flakiness_list = [] | |
| 294 for try_job in try_jobs: | |
| 295 try_job_key = try_job['try_job_key'] | |
| 296 task = step_tasks_info.get('swarming_tasks', {}).get(try_job_key) | |
| 297 | |
| 298 if task['task_info']['status'] != wf_analysis_status.ANALYZED: | |
| 299 # There is someting wrong with swarming task or it's not done yet, | |
| 300 # no try job yet or ever. | |
| 301 try_job['status'] = result_status.NO_TRY_JOB_REASON_MAP[ | |
| 302 task['task_info']['status']] | |
| 303 try_job['tests'] = task.get('all_tests', []) | |
| 304 else: | |
| 305 # Swarming task is completed, group tests according to task result. | |
| 306 try_job['ref_name'] = task['ref_name'] | |
| 307 if task.get('reliable_tests'): | |
| 308 try_job['tests'] = task['reliable_tests'] | |
| 309 if task.get('flaky_tests'): | |
| 310 # Split this try job into two groups: flaky group and reliable group. | |
| 311 flaky_try_job = copy.deepcopy(try_job) | |
| 312 flaky_try_job['status'] = result_status.FLAKY | |
| 313 flaky_try_job['tests'] = task['flaky_tests'] | |
| 314 flaky_try_job['task_id'] = task['task_info']['task_id'] | |
| 315 flaky_try_job['task_url'] = task['task_info']['task_url'] | |
| 316 additional_flakiness_list.append(flaky_try_job) | |
| 317 elif task.get('flaky_tests'): # pragma: no cover | |
| 318 # All Flaky. | |
| 319 try_job['status'] = result_status.FLAKY | |
| 320 try_job['tests'] = task['flaky_tests'] | |
| 321 | |
| 322 if task['task_info'].get('task_id'): | |
| 323 try_job['task_id'] = task['task_info']['task_id'] | |
| 324 try_job['task_url'] = task['task_info']['task_url'] | |
| 325 | |
| 326 if additional_flakiness_list: | |
| 327 try_jobs.extend(additional_flakiness_list) | |
|
lijeffrey
2016/03/28 23:35:04
nit: even if additional_flakiness_list is empty an
chanli
2016/03/29 01:35:47
Done.
| |
| 328 | |
| 329 | |
| 330 def _GetAllTryJobResultsForTest(failure_result_map, tasks_info): | |
| 331 culprits_info = defaultdict(lambda: defaultdict(list)) | |
| 332 if not tasks_info: | |
| 333 return culprits_info | |
| 334 | |
| 335 try_job_keys = set() | |
| 336 for step_name, step_failure_result_map in failure_result_map.iteritems(): | |
| 337 try_jobs = culprits_info[step_name]['try_jobs'] | |
| 338 | |
| 339 if isinstance(step_failure_result_map, dict): | |
| 340 for try_job_key in step_failure_result_map.values(): | |
| 341 if try_job_key not in try_job_keys: | |
| 342 try_job_dict = { | |
| 343 'try_job_key': try_job_key | |
| 344 } | |
| 345 try_jobs.append(try_job_dict) | |
| 346 try_job_keys.add(try_job_key) | |
| 347 else: | |
| 348 # Try job should only be triggered for swarming tests, because we cannot | |
| 349 # identify flaky tests for non-swarming tests. | |
| 350 try_job_dict = { | |
| 351 'try_job_key': step_failure_result_map | |
| 352 } | |
| 353 try_jobs.append(try_job_dict) | |
| 354 | |
| 355 _UpdateTryJobInfoBasedOnSwarming(tasks_info[step_name], try_jobs) | |
| 356 | |
| 357 for try_job_key in try_job_keys: | |
| 358 _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info) | |
| 359 | |
| 360 return culprits_info | |
| 361 | |
| 362 | |
| 363 def _GetTryJobResultForCompile(failure_result_map): | |
| 364 try_job_key = failure_result_map['compile'] | |
| 365 referred_build_keys = try_job_key.split('/') | |
| 366 culprit_info = defaultdict(lambda: defaultdict(list)) | |
| 367 | |
| 368 try_job = WfTryJob.Get(*referred_build_keys) | |
| 369 if not try_job or try_job.test_results: | |
| 370 return culprit_info | |
| 371 | |
| 372 try_job_result = ( | |
| 373 try_job.compile_results[-1] if try_job.compile_results else None) | |
| 374 | |
| 375 compile_try_job = { | |
| 376 'try_job_key': try_job_key, | |
| 377 'status': try_job.status | |
| 378 } | |
| 379 | |
| 380 if try_job_result: | |
| 381 if try_job_result.get('url'): | |
| 382 compile_try_job['try_job_url'] = try_job_result['url'] | |
| 383 compile_try_job['try_job_build_number'] = ( | |
| 384 _GetTryJobBuildNumber(try_job_result['url'])) | |
| 385 if try_job_result.get('culprit', {}).get('compile'): | |
| 386 compile_try_job['culprit'] = try_job_result['culprit']['compile'] | |
| 387 | |
| 388 culprit_info['compile']['try_jobs'].append(compile_try_job) | |
| 389 return culprit_info | |
| 205 | 390 |
| 206 | 391 |
| 207 def GetAllTryJobResults(master_name, builder_name, build_number): | 392 def GetAllTryJobResults(master_name, builder_name, build_number): |
| 208 culprits_info = {} | 393 culprits_info = {} |
| 209 try_job_keys = set() | 394 is_test_failure = True |
| 210 | 395 |
| 211 analysis = WfAnalysis.Get(master_name, builder_name, build_number) | 396 analysis_result, failure_result_map = _GetFailureResultMap( |
| 212 if not analysis: | 397 master_name, builder_name, build_number) |
| 213 return culprits_info | 398 |
| 214 | |
| 215 failure_result_map = analysis.failure_result_map | |
| 216 if failure_result_map: | 399 if failure_result_map: |
| 217 # failure_result_map uses step_names as keys and saves referred try_job_keys | 400 for step_name in failure_result_map: |
|
lijeffrey
2016/03/28 23:35:04
I would separate this out into a different functio
chanli
2016/03/29 01:35:47
Since this logic is pretty straight forward, I wil
| |
| 218 # If non-swarming, step_name and referred_try_job_key match directly as: | 401 if step_name == 'compile': |
|
lijeffrey
2016/03/28 23:35:04
nit: just to be safe use step_name.lower()
chanli
2016/03/29 01:35:47
Done.
| |
| 219 # step_name: try_job_key | 402 is_test_failure = False |
| 220 # If swarming, add one more layer of tests, so the format would be: | 403 break |
| 221 # step_name: { | 404 if is_test_failure: |
| 222 # test_name1: try_job_key1, | 405 tasks_info = _GenerateSwarmingTasksData(failure_result_map) |
| 223 # test_name2: try_job_key2, | 406 culprits_info = _GetAllTryJobResultsForTest( |
| 224 # ... | 407 failure_result_map, tasks_info) |
| 225 # } | 408 else: |
| 226 for step_name, step_failure_result_map in failure_result_map.iteritems(): | 409 culprits_info = _GetTryJobResultForCompile(failure_result_map) |
| 227 if isinstance(step_failure_result_map, dict): | 410 elif analysis_result: |
| 228 step_refering_keys = set() | 411 for failure in analysis_result['failures']: |
| 229 for failed_test, try_job_key in step_failure_result_map.iteritems(): | 412 step_name = failure['step_name'] |
| 230 step_test_key = '%s-%s' % (step_name, failed_test) | 413 tests = [] |
| 231 culprits_info[step_test_key] = { | 414 for test in failure['tests']: |
| 232 'step_name': step_name, | 415 tests.append(test['test_name']) |
| 233 'test_name': failed_test, | 416 |
| 234 'try_job_key': try_job_key | 417 culprits_info[step_name] = { |
| 235 } | 418 'try_jobs': [ |
| 236 step_refering_keys.add(try_job_key) | 419 { |
| 237 | 420 'status': result_status.NO_FAILURE_RESULT_MAP, |
| 238 _UpdateTryJobCulpritUsingSwarmingTask( | 421 'tests': tests |
| 239 step_name, step_refering_keys, culprits_info) | 422 } |
| 240 try_job_keys.update(step_refering_keys) | 423 ] |
| 241 else: | 424 } |
| 242 culprits_info[step_name] = { | |
| 243 'step_name': step_name, | |
| 244 'test_name': 'N/A', | |
| 245 'try_job_key': step_failure_result_map | |
| 246 } | |
| 247 try_job_keys.add(step_failure_result_map) | |
| 248 | |
| 249 for try_job_key in try_job_keys: | |
| 250 _GetCulpritInfoForTryJobResult(try_job_key, culprits_info) | |
| 251 | |
| 252 return culprits_info | 425 return culprits_info |
| OLD | NEW |