Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright 2014 The Chromium Authors. All rights reserved. | 1 # Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 import copy | |
| 6 from collections import defaultdict | |
|
stgao
2016/03/25 23:10:51
nit: order.
chanli
2016/03/25 23:44:07
Done.
| |
| 5 from datetime import datetime | 7 from datetime import datetime |
| 6 import os | 8 import os |
| 7 | 9 |
| 8 from google.appengine.api import users | 10 from google.appengine.api import users |
| 9 | 11 |
| 10 from base_handler import BaseHandler | 12 from base_handler import BaseHandler |
| 11 from base_handler import Permission | 13 from base_handler import Permission |
| 14 from handlers import handlers_util | |
| 15 from handlers.handlers_util import NO_TRY_JOB_REASON_MAP | |
| 16 from model import wf_analysis_status | |
| 12 from model.wf_analysis import WfAnalysis | 17 from model.wf_analysis import WfAnalysis |
| 13 from model.wf_analysis_result_status import RESULT_STATUS_TO_DESCRIPTION | 18 from model.wf_analysis_result_status import RESULT_STATUS_TO_DESCRIPTION |
| 14 from waterfall import build_failure_analysis_pipelines | 19 from waterfall import build_failure_analysis_pipelines |
| 15 from waterfall import buildbot | 20 from waterfall import buildbot |
| 16 from waterfall import waterfall_config | 21 from waterfall import waterfall_config |
| 17 | 22 |
| 18 | 23 |
| 19 BUILD_FAILURE_ANALYSIS_TASKQUEUE = 'build-failure-analysis-queue' | 24 BUILD_FAILURE_ANALYSIS_TASKQUEUE = 'build-failure-analysis-queue' |
| 20 | 25 |
| 21 | 26 |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 39 datetime.utcfromtimestamp(triage_record['triage_timestamp'])), | 44 datetime.utcfromtimestamp(triage_record['triage_timestamp'])), |
| 40 'user_name': triage_record['user_name'], | 45 'user_name': triage_record['user_name'], |
| 41 'result_status': RESULT_STATUS_TO_DESCRIPTION.get( | 46 'result_status': RESULT_STATUS_TO_DESCRIPTION.get( |
| 42 triage_record['result_status']), | 47 triage_record['result_status']), |
| 43 'version': triage_record.get('version'), | 48 'version': triage_record.get('version'), |
| 44 }) | 49 }) |
| 45 | 50 |
| 46 return triage_history | 51 return triage_history |
| 47 | 52 |
| 48 | 53 |
| 54 def _GetOrganizedAnalysisResultBySuspectedCL(analysis_result): | |
| 55 """Group tests it they have the same suspected CLs.""" | |
| 56 | |
| 57 organized_results = defaultdict(list) | |
| 58 | |
| 59 if not analysis_result: | |
| 60 return organized_results | |
| 61 | |
| 62 for failure in analysis_result.get('failures', []): | |
| 63 step_name = failure['step_name'] | |
| 64 supported = failure.get('supported', True) | |
| 65 step_revisions_index = {} | |
| 66 organized_suspected_cls = organized_results[step_name] | |
| 67 | |
| 68 if not failure.get('tests'): | |
| 69 # Non swraming. | |
| 70 shared_result = { | |
| 71 'first_failure': failure['first_failure'], | |
| 72 'last_pass': failure.get('last_pass'), | |
| 73 'supported': supported, | |
| 74 'tests': [], | |
| 75 'suspected_cls': failure['suspected_cls'] | |
| 76 } | |
| 77 organized_suspected_cls.append(shared_result) | |
| 78 continue | |
| 79 | |
| 80 # Swarming tests. | |
| 81 for index, cl in enumerate(failure['suspected_cls']): | |
| 82 step_revisions_index[cl['revision']] = index | |
| 83 | |
| 84 # Groups tests by suspected CLs' revision. | |
| 85 # Format is as below: | |
| 86 # { | |
| 87 # 1: { | |
| 88 # 'tests': ['test1', 'test2'], | |
| 89 # 'revisions': ['rev1'], | |
| 90 # 'suspected_cls': [ | |
| 91 # # suspected cl info for rev1 at step level. | |
| 92 # ] | |
| 93 # }, | |
| 94 # 3: { | |
| 95 # 'tests': ['test3'], | |
| 96 # 'revisions': ['rev3', 'rev2'], | |
| 97 # 'suspected_cls': [ | |
| 98 # # suspected cl info for rev2, rev3 at step level. | |
| 99 # ] | |
| 100 # } | |
| 101 # } | |
| 102 tests_group = defaultdict(list) | |
| 103 for index, test in enumerate(failure['tests']): | |
| 104 test_name = test['test_name'] | |
| 105 found_group = False | |
| 106 revisions = set() | |
| 107 for cl in test['suspected_cls']: | |
| 108 revisions.add(cl['revision']) | |
| 109 for group in tests_group.values(): | |
| 110 if revisions == set(group['revisions']): | |
| 111 group['tests'].append(test_name) | |
| 112 found_group = True | |
| 113 break | |
| 114 if not found_group: | |
| 115 group_suspected_cls = [] | |
| 116 for revision in revisions: | |
| 117 group_suspected_cls.append( | |
| 118 failure['suspected_cls'][step_revisions_index[revision]]) | |
| 119 tests_group[index] = { | |
| 120 'tests': [test_name], | |
| 121 'revisions': list(revisions), | |
| 122 'suspected_cls': group_suspected_cls | |
| 123 } | |
| 124 | |
| 125 for index, group in tests_group.iteritems(): | |
| 126 test_result = failure['tests'][index] | |
| 127 shared_result = { | |
| 128 'first_failure': test_result['first_failure'], | |
| 129 'last_pass': test_result.get('last_pass'), | |
| 130 'supported': supported, | |
| 131 'tests': group['tests'], | |
| 132 'suspected_cls': group['suspected_cls'] | |
|
stgao
2016/03/25 23:10:51
Is it possible that two tests failed due to the sa
chanli
2016/03/25 23:44:07
If they failed due to the same revision, and faile
| |
| 133 } | |
| 134 organized_suspected_cls.append(shared_result) | |
| 135 | |
| 136 return organized_results | |
| 137 | |
| 138 | |
| 139 def _UpdateAnalysisResultWithTryJobInfo( | |
|
stgao
2016/03/25 23:10:51
naming nit: Update -> Get?
chanli
2016/03/25 23:44:07
Done.
| |
| 140 organized_results, master_name, builder_name, build_number): | |
| 141 """Reorganizes analysis result and try job result by step_name and culprit. | |
| 142 | |
| 143 Returns: | |
| 144 update_result (dict): A dict of classified results. | |
| 145 | |
| 146 The format for those dicts are as below: | |
| 147 { | |
| 148 # A dict of results that contains both | |
| 149 # heuristic analysis results and try job results. | |
| 150 'determined_results': { | |
| 151 'step1': { | |
| 152 'results': [ | |
| 153 { | |
| 154 'try_job':{ | |
| 155 'ref_name': 'step1', | |
| 156 'try_job_key': 'm/b/119', | |
| 157 'status': wf_analysis_status.ANALYZED, | |
| 158 'try_job_url': 'url/121', | |
| 159 'try_job_build_number': 121, | |
| 160 }, | |
| 161 'heuristic_analysis': { | |
| 162 'suspected_cls': [ | |
| 163 { | |
| 164 'build_number': 98, | |
| 165 'repo_name': 'chromium', | |
| 166 'revision': 'r98_1', | |
| 167 'commit_position': None, | |
| 168 'url': None, | |
| 169 'score': 5, | |
| 170 'hints': { | |
| 171 'added f98.cc (and it was in log)': 5, | |
| 172 }, | |
| 173 } | |
| 174 ] | |
| 175 } | |
| 176 'tests': ['test1', 'test2'], | |
| 177 'first_failure': 98, | |
| 178 'last_pass': 97, | |
| 179 'supported': True | |
| 180 } | |
| 181 ] | |
| 182 } | |
| 183 }, | |
| 184 # A dict of result for flaky tests. | |
| 185 'flaky_results': {...}, | |
| 186 # A dict of results for all the other conditions, | |
| 187 # such as non-swarming tests or swarming rerun failed. | |
| 188 'undetermined_results': {...} | |
| 189 } | |
| 190 """ | |
| 191 update_results = defaultdict(lambda: defaultdict(lambda: defaultdict(list))) | |
| 192 | |
| 193 if not organized_results: | |
| 194 return update_results | |
| 195 | |
| 196 try_job_info = handlers_util.GetAllTryJobResults( | |
| 197 master_name, builder_name, build_number) | |
| 198 if not try_job_info: | |
| 199 return update_results | |
| 200 | |
| 201 for step_name, try_jobs in try_job_info.iteritems(): | |
| 202 try_jobs = try_jobs['try_jobs'] | |
| 203 step_heuristic_results = organized_results[step_name] | |
| 204 step_updated_results = update_results[step_name]['results'] | |
| 205 | |
| 206 # Finds out try job result index and heuristic result index for each test. | |
| 207 test_result_map = defaultdict(lambda: defaultdict(int)) | |
| 208 | |
| 209 for index, try_job in enumerate(try_jobs): | |
| 210 if not try_job.get('tests'): # Compile or non-swarming. | |
| 211 test_result_map['non-swarming']['try_job_index'] = index | |
| 212 continue | |
| 213 for test_name in try_job['tests']: | |
| 214 test_result_map[test_name]['try_job_index'] = index | |
| 215 for index, heuristic_result in enumerate(step_heuristic_results): | |
| 216 if not heuristic_result.get('tests'): # Compile or non-swarming. | |
| 217 test_result_map['non-swarming']['heuristic_index'] = index | |
| 218 continue | |
| 219 for test_name in heuristic_result['tests']: | |
| 220 test_result_map[test_name]['heuristic_index'] = index | |
| 221 | |
| 222 # Group tests based on indices. | |
| 223 indices_test_map = defaultdict(list) | |
| 224 for test_name, indices in test_result_map.iteritems(): | |
| 225 indices_test_map[ | |
| 226 (indices['try_job_index'], indices['heuristic_index'])].append( | |
| 227 test_name) | |
| 228 | |
| 229 for indices, tests in indices_test_map.iteritems(): | |
| 230 try_job_result = try_jobs[indices[0]] | |
| 231 heuristic_result = step_heuristic_results[indices[1]] | |
| 232 | |
| 233 final_result = { | |
| 234 'try_job': try_job_result, | |
| 235 'heuristic_analysis': { | |
| 236 'suspected_cls': heuristic_result['suspected_cls'] | |
| 237 }, | |
| 238 'tests': tests if tests != ['non-swarming'] else [], | |
| 239 'first_failure': heuristic_result['first_failure'], | |
| 240 'last_pass': heuristic_result['last_pass'], | |
| 241 'supported': heuristic_result['supported'] | |
| 242 } | |
| 243 | |
| 244 if try_job_result['status'] == handlers_util.FLAKY: | |
| 245 step_updated_results['flaky_results'].append(final_result) | |
| 246 elif try_job_result['status'] in NO_TRY_JOB_REASON_MAP.values(): | |
| 247 step_updated_results['undetermined_results'].append(final_result) | |
| 248 else: | |
| 249 step_updated_results['determined_results'].append(final_result) | |
| 250 | |
| 251 return update_results | |
| 252 | |
| 253 | |
| 49 class BuildFailure(BaseHandler): | 254 class BuildFailure(BaseHandler): |
| 50 PERMISSION_LEVEL = Permission.ANYONE | 255 PERMISSION_LEVEL = Permission.ANYONE |
| 51 | 256 |
| 52 def _ShowDebugInfo(self): | 257 def _ShowDebugInfo(self): |
| 53 # Show debug info only if the app is run locally during development, if the | 258 # Show debug info only if the app is run locally during development, if the |
| 54 # currently logged-in user is an admin, or if it is explicitly requested | 259 # currently logged-in user is an admin, or if it is explicitly requested |
| 55 # with parameter 'debug=1'. | 260 # with parameter 'debug=1'. |
| 56 return (os.environ['SERVER_SOFTWARE'].startswith('Development') or | 261 return (os.environ['SERVER_SOFTWARE'].startswith('Development') or |
| 57 users.is_current_user_admin() or self.request.get('debug') == '1') | 262 users.is_current_user_admin() or self.request.get('debug') == '1') |
| 58 | 263 |
| (...skipping 29 matching lines...) Expand all Loading... | |
| 88 # Only allow admin to force a re-run and set the build_completed. | 293 # Only allow admin to force a re-run and set the build_completed. |
| 89 force = (users.is_current_user_admin() and | 294 force = (users.is_current_user_admin() and |
| 90 self.request.get('force') == '1') | 295 self.request.get('force') == '1') |
| 91 build_completed = (users.is_current_user_admin() and | 296 build_completed = (users.is_current_user_admin() and |
| 92 self.request.get('build_completed') == '1') | 297 self.request.get('build_completed') == '1') |
| 93 analysis = build_failure_analysis_pipelines.ScheduleAnalysisIfNeeded( | 298 analysis = build_failure_analysis_pipelines.ScheduleAnalysisIfNeeded( |
| 94 master_name, builder_name, build_number, | 299 master_name, builder_name, build_number, |
| 95 build_completed=build_completed, force=force, | 300 build_completed=build_completed, force=force, |
| 96 queue_name=BUILD_FAILURE_ANALYSIS_TASKQUEUE) | 301 queue_name=BUILD_FAILURE_ANALYSIS_TASKQUEUE) |
| 97 | 302 |
| 303 organized_results = _GetOrganizedAnalysisResultBySuspectedCL( | |
| 304 analysis.result) | |
| 305 analysis_result = _UpdateAnalysisResultWithTryJobInfo( | |
| 306 organized_results, *build_info) | |
| 307 | |
| 98 data = { | 308 data = { |
| 99 'master_name': analysis.master_name, | 309 'master_name': analysis.master_name, |
| 100 'builder_name': analysis.builder_name, | 310 'builder_name': analysis.builder_name, |
| 101 'build_number': analysis.build_number, | 311 'build_number': analysis.build_number, |
| 102 'pipeline_status_path': analysis.pipeline_status_path, | 312 'pipeline_status_path': analysis.pipeline_status_path, |
| 103 'show_debug_info': self._ShowDebugInfo(), | 313 'show_debug_info': self._ShowDebugInfo(), |
| 104 'analysis_request_time': _FormatDatetime(analysis.request_time), | 314 'analysis_request_time': _FormatDatetime(analysis.request_time), |
| 105 'analysis_start_time': _FormatDatetime(analysis.start_time), | 315 'analysis_start_time': _FormatDatetime(analysis.start_time), |
| 106 'analysis_end_time': _FormatDatetime(analysis.end_time), | 316 'analysis_end_time': _FormatDatetime(analysis.end_time), |
| 107 'analysis_duration': analysis.duration, | 317 'analysis_duration': analysis.duration, |
| 108 'analysis_update_time': _FormatDatetime(analysis.updated_time), | 318 'analysis_update_time': _FormatDatetime(analysis.updated_time), |
| 109 'analysis_completed': analysis.completed, | 319 'analysis_completed': analysis.completed, |
| 110 'analysis_failed': analysis.failed, | 320 'analysis_failed': analysis.failed, |
| 111 'analysis_result': analysis.result, | 321 'analysis_result': analysis_result, |
| 112 'analysis_correct': analysis.correct, | 322 'analysis_correct': analysis.correct, |
| 113 'triage_history': _GetTriageHistory(analysis), | 323 'triage_history': _GetTriageHistory(analysis), |
| 114 'show_triage_help_button': self._ShowTriageHelpButton(), | 324 'show_triage_help_button': self._ShowTriageHelpButton(), |
| 115 } | 325 } |
| 116 | 326 |
| 117 return {'template': 'build_failure.html', 'data': data} | 327 return {'template': 'build_failure.html', 'data': data} |
| 118 | 328 |
| 119 def HandlePost(self): # pragma: no cover | 329 def HandlePost(self): # pragma: no cover |
| 120 return self.HandleGet() | 330 return self.HandleGet() |
| OLD | NEW |