Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright 2014 The Chromium Authors. All rights reserved. | 1 # Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 """This module is to handle manual triage of analysis result. | 5 """This module is to handle manual triage of analysis result. |
| 6 | 6 |
| 7 This handler will flag the analysis result as correct or incorrect. | 7 This handler will flag the analysis result as correct or incorrect. |
| 8 TODO: work on an automatic or semi-automatic way to triage analysis result. | 8 TODO: work on an automatic or semi-automatic way to triage analysis result. |
| 9 """ | 9 """ |
| 10 | 10 |
| 11 import calendar | 11 import calendar |
| 12 from datetime import datetime | 12 from datetime import datetime |
| 13 from datetime import timedelta | |
| 13 | 14 |
| 14 from google.appengine.api import users | 15 from google.appengine.api import users |
| 15 from google.appengine.ext import ndb | 16 from google.appengine.ext import ndb |
| 16 | 17 |
| 17 from common.base_handler import BaseHandler | 18 from common.base_handler import BaseHandler |
| 18 from common.base_handler import Permission | 19 from common.base_handler import Permission |
| 20 from model import result_status | |
| 19 from model.wf_analysis import WfAnalysis | 21 from model.wf_analysis import WfAnalysis |
| 20 from model import result_status | |
| 21 from waterfall import buildbot | 22 from waterfall import buildbot |
| 22 | 23 |
| 23 | 24 |
| 24 @ndb.transactional | 25 MATCHING_ANALYSIS_HOURS_AGO_START = 24 |
| 25 def _UpdateAnalysisResultStatus( | 26 MATCHING_ANALYSIS_HOURS_AGO_END = 24 |
| 26 master_name, builder_name, build_number, correct, user_name=None): | 27 |
| 27 analysis = WfAnalysis.Get(master_name, builder_name, build_number) | 28 |
| 28 if not analysis or not analysis.completed: | 29 def _GenPotentialCulpritTupleList(analysis): |
| 30 """Generates a list of potential culprit tuples. | |
| 31 | |
| 32 Args: | |
| 33 analysis: the analysis from which to generate a potenial culript tuple list. | |
| 34 | |
| 35 Returns: | |
| 36 A list of cultprit tuples that each could look like: | |
| 37 | |
| 38 (step_name, test_name, revision) | |
| 39 | |
| 40 or could look like: | |
| 41 | |
| 42 (step_name, revision) | |
| 43 """ | |
| 44 potential_culprit_tuple_list = [] | |
| 45 | |
| 46 # Iterates through the failures, tests, and suspected_cls, appending potential | |
| 47 # (step_name, test_name, revision) and (step_name, revision) culprit tuples to | |
| 48 # the list. | |
| 49 for failure in analysis.result['failures']: | |
| 50 if failure.get('tests'): | |
| 51 for test in failure['tests']: | |
| 52 for suspected_cl in test.get('suspected_cls', []): | |
| 53 potential_culprit_tuple_list.append(( | |
| 54 failure['step_name'], | |
| 55 test['test_name'], | |
| 56 suspected_cl['revision'])) | |
| 57 else: | |
| 58 for suspected_cl in failure['suspected_cls']: | |
| 59 potential_culprit_tuple_list.append(( | |
| 60 failure['step_name'], | |
| 61 suspected_cl['revision'])) | |
| 62 | |
| 63 return potential_culprit_tuple_list | |
| 64 | |
| 65 | |
| 66 def _DoAnalysesMatch(analysis_1, analysis_2): | |
| 67 """Checks if two analyses match. | |
| 68 | |
| 69 Args: | |
| 70 analysis_1: The first analysis to compare. | |
| 71 analysis_2: The second analysis to compare. | |
| 72 | |
| 73 Returns: | |
| 74 True if the two analyses' sorted potential culprit lists match, otherwise | |
| 75 False. | |
| 76 """ | |
| 77 | |
| 78 # Get list of potential culprit tuples. | |
| 79 potential_culprit_tuple_list_1 = _GenPotentialCulpritTupleList(analysis_1) | |
| 80 potential_culprit_tuple_list_2 = _GenPotentialCulpritTupleList(analysis_2) | |
| 81 | |
| 82 # Both analyses must have non-empty potential culprit lists. | |
| 83 if not potential_culprit_tuple_list_1 or not potential_culprit_tuple_list_2: | |
| 29 return False | 84 return False |
| 30 | 85 |
| 31 if correct: | 86 # Both analyses must have matching potential culprit lists. |
| 87 return (sorted(potential_culprit_tuple_list_1) == | |
| 88 sorted(potential_culprit_tuple_list_2)) | |
| 89 | |
| 90 | |
| 91 def _AppendTriageHistoryRecord(analysis, is_correct, user_name): | |
| 92 """Appends a triage history record to the given analysis. | |
| 93 | |
| 94 Args: | |
| 95 analysis: The analysis to which to append the history record. | |
| 96 correct: True if the history record should indicate a correct judgement, | |
|
chanli
2016/06/21 18:32:34
Nit: this argument is now 'is_correct' now.
josiahk
2016/06/23 22:41:02
Done.
| |
| 97 otherwise False. | |
| 98 user_name: The user_name of the person to include in the triage record. | |
| 99 """ | |
| 100 if is_correct: | |
| 32 if analysis.suspected_cls: | 101 if analysis.suspected_cls: |
| 33 analysis.result_status = result_status.FOUND_CORRECT | 102 analysis.result_status = result_status.FOUND_CORRECT |
| 34 analysis.culprit_cls = analysis.suspected_cls | 103 analysis.culprit_cls = analysis.suspected_cls |
| 35 else: | 104 else: |
| 36 analysis.result_status = result_status.NOT_FOUND_CORRECT | 105 analysis.result_status = result_status.NOT_FOUND_CORRECT |
| 37 analysis.culprit_cls = None | 106 analysis.culprit_cls = None |
| 38 else: | 107 else: |
| 39 analysis.culprit_cls = None | 108 analysis.culprit_cls = None |
| 40 if analysis.suspected_cls: | 109 if analysis.suspected_cls: |
| 41 analysis.result_status = result_status.FOUND_INCORRECT | 110 analysis.result_status = result_status.FOUND_INCORRECT |
| 42 else: | 111 else: |
| 43 analysis.result_status = result_status.NOT_FOUND_INCORRECT | 112 analysis.result_status = result_status.NOT_FOUND_INCORRECT |
| 44 | 113 |
| 45 triage_record = { | 114 triage_record = { |
| 46 'triage_timestamp': calendar.timegm(datetime.utcnow().timetuple()), | 115 'triage_timestamp': calendar.timegm(datetime.utcnow().timetuple()), |
| 47 'user_name': user_name, | 116 'user_name': user_name, |
| 48 'result_status': analysis.result_status, | 117 'result_status': analysis.result_status, |
| 49 'version': analysis.version, | 118 'version': analysis.version, |
| 50 } | 119 } |
| 51 if not analysis.triage_history: | 120 if not analysis.triage_history: |
| 52 analysis.triage_history = [] | 121 analysis.triage_history = [] |
| 53 analysis.triage_history.append(triage_record) | 122 analysis.triage_history.append(triage_record) |
| 54 | 123 |
| 55 analysis.put() | 124 analysis.put() |
| 56 return True | 125 |
| 126 | |
| 127 @ndb.transactional | |
| 128 def _UpdateAnalysisResultStatus( | |
| 129 master_name, builder_name, build_number, is_correct, user_name=None): | |
| 130 analysis = WfAnalysis.Get(master_name, builder_name, build_number) | |
| 131 if not analysis or not analysis.completed: | |
| 132 return False, None | |
| 133 | |
| 134 _AppendTriageHistoryRecord(analysis, is_correct, user_name) | |
| 135 | |
| 136 return True, analysis | |
| 137 | |
| 138 | |
| 139 def _GetDuplicateAnalyses(original_analysis): | |
| 140 start_time = (original_analysis.build_start_time - | |
| 141 timedelta(hours=MATCHING_ANALYSIS_HOURS_AGO_START)) | |
| 142 end_time = (original_analysis.build_start_time + | |
| 143 timedelta(hours=MATCHING_ANALYSIS_HOURS_AGO_END)) | |
| 144 | |
| 145 # Don't count any analyses from today (except for exactly at midnight). | |
| 146 midnight = datetime.utcnow().replace( | |
|
chanli
2016/06/21 18:32:34
I think 'midnight' should refer to local(PST) midn
josiahk
2016/06/23 22:41:02
Done!
| |
| 147 hour=0, minute=0, second=0, microsecond=0) | |
| 148 if end_time > midnight: | |
| 149 end_time = midnight | |
| 150 | |
| 151 # Retrieve potential duplicate build analyses. | |
| 152 analysis_results = WfAnalysis.query(ndb.AND( | |
| 153 WfAnalysis.build_start_time >= start_time, | |
| 154 WfAnalysis.build_start_time <= end_time, | |
| 155 WfAnalysis.result_status == result_status.FOUND_UNTRIAGED | |
| 156 )).fetch() | |
| 157 | |
| 158 # Further filter potential duplicates and return them. | |
| 159 return [analysis for analysis in analysis_results if | |
| 160 _DoAnalysesMatch(original_analysis, analysis) and | |
| 161 original_analysis.key is not analysis.key and | |
| 162 analysis.completed] | |
| 163 | |
| 164 | |
| 165 def _TriageDuplicateResults(original_analysis, is_correct, user_name=None): | |
| 166 matching_analyses = _GetDuplicateAnalyses(original_analysis) | |
| 167 | |
| 168 for analysis in matching_analyses: | |
| 169 _AppendTriageHistoryRecord(analysis, is_correct, user_name) | |
| 57 | 170 |
| 58 | 171 |
| 59 class TriageAnalysis(BaseHandler): | 172 class TriageAnalysis(BaseHandler): |
| 60 PERMISSION_LEVEL = Permission.CORP_USER | 173 PERMISSION_LEVEL = Permission.CORP_USER |
| 61 | 174 |
| 62 def HandleGet(self): # pragma: no cover | 175 def HandleGet(self): # pragma: no cover |
| 63 return self.HandlePost() | 176 return self.HandlePost() |
| 64 | 177 |
| 65 def HandlePost(self): | 178 def HandlePost(self): |
| 66 """Sets the manual triage result for the analysis. | 179 """Sets the manual triage result for the analysis. |
| 67 | 180 |
| 68 Mark the analysis result as correct/wrong/etc. | 181 Mark the analysis result as correct/wrong/etc. |
| 69 TODO: make it possible to set the real culprit CLs. | 182 TODO: make it possible to set the real culprit CLs. |
| 70 """ | 183 """ |
| 71 url = self.request.get('url').strip() | 184 url = self.request.get('url').strip() |
| 72 build_info = buildbot.ParseBuildUrl(url) | 185 build_info = buildbot.ParseBuildUrl(url) |
| 73 if not build_info: | 186 if not build_info: |
| 74 return {'data': {'success': False}} | 187 return {'data': {'success': False}} |
| 75 master_name, builder_name, build_number = build_info | 188 master_name, builder_name, build_number = build_info |
| 76 | 189 |
| 77 correct = self.request.get('correct').lower() == 'true' | 190 is_correct = self.request.get('correct').lower() == 'true' |
| 78 # As the permission level is CORP_USER, we could assume the current user | 191 # As the permission level is CORP_USER, we could assume the current user |
| 79 # already logged in. | 192 # already logged in. |
| 80 user_name = users.get_current_user().email().split('@')[0] | 193 user_name = users.get_current_user().email().split('@')[0] |
| 81 success = _UpdateAnalysisResultStatus( | 194 success, original_analysis = _UpdateAnalysisResultStatus( |
| 82 master_name, builder_name, build_number, correct, user_name) | 195 master_name, builder_name, build_number, is_correct, user_name) |
| 196 if success: | |
| 197 _TriageDuplicateResults(original_analysis, is_correct, user_name) | |
| 83 return {'data': {'success': success}} | 198 return {'data': {'success': success}} |
| OLD | NEW |