Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright 2014 The Chromium Authors. All rights reserved. | 1 # Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 """This module is to handle manual triage of analysis result. | 5 """This module is to handle manual triage of analysis result. |
| 6 | 6 |
| 7 This handler will flag the analysis result as correct or incorrect. | 7 This handler will flag the analysis result as correct or incorrect. |
| 8 TODO: work on an automatic or semi-automatic way to triage analysis result. | 8 TODO: work on an automatic or semi-automatic way to triage analysis result. |
| 9 """ | 9 """ |
| 10 | 10 |
| 11 import calendar | 11 import calendar |
| 12 from datetime import datetime | 12 from datetime import datetime |
| 13 from datetime import timedelta | |
| 13 | 14 |
| 14 from google.appengine.api import users | 15 from google.appengine.api import users |
| 15 from google.appengine.ext import ndb | 16 from google.appengine.ext import ndb |
| 17 import pytz.gae | |
| 16 | 18 |
| 17 from common.base_handler import BaseHandler | 19 from common.base_handler import BaseHandler |
| 18 from common.base_handler import Permission | 20 from common.base_handler import Permission |
| 21 from model import result_status | |
| 19 from model.wf_analysis import WfAnalysis | 22 from model.wf_analysis import WfAnalysis |
| 20 from model import result_status | |
| 21 from waterfall import buildbot | 23 from waterfall import buildbot |
| 22 | 24 |
| 23 | 25 |
| 24 @ndb.transactional | 26 MATCHING_ANALYSIS_HOURS_AGO_START = 24 |
| 25 def _UpdateAnalysisResultStatus( | 27 MATCHING_ANALYSIS_HOURS_AGO_END = 24 |
| 26 master_name, builder_name, build_number, correct, user_name=None): | 28 |
| 27 analysis = WfAnalysis.Get(master_name, builder_name, build_number) | 29 MATCHING_ANALYSIS_END_BOUND_TIME_ZONE = "US/Pacific" |
| 28 if not analysis or not analysis.completed: | 30 |
| 31 def _GenPotentialCulpritTupleList(analysis): | |
| 32 """Generates a list of potential culprit tuples. | |
| 33 | |
| 34 Args: | |
| 35 analysis: the analysis from which to generate a potenial culript tuple list. | |
| 36 | |
| 37 Returns: | |
| 38 A list of cultprit tuples that each could look like: | |
| 39 | |
| 40 (step_name, test_name, revision) | |
| 41 | |
| 42 or could look like: | |
| 43 | |
| 44 (step_name, revision) | |
| 45 """ | |
| 46 potential_culprit_tuple_list = [] | |
| 47 | |
| 48 # Iterates through the failures, tests, and suspected_cls, appending potential | |
| 49 # (step_name, test_name, revision) and (step_name, revision) culprit tuples to | |
| 50 # the list. | |
| 51 for failure in analysis.result['failures']: | |
| 52 if failure.get('tests'): | |
| 53 for test in failure['tests']: | |
| 54 for suspected_cl in test.get('suspected_cls', []): | |
| 55 potential_culprit_tuple_list.append(( | |
| 56 failure['step_name'], | |
| 57 test['test_name'], | |
| 58 suspected_cl['revision'])) | |
| 59 else: | |
| 60 for suspected_cl in failure['suspected_cls']: | |
| 61 potential_culprit_tuple_list.append(( | |
| 62 failure['step_name'], | |
| 63 suspected_cl['revision'])) | |
| 64 | |
| 65 return potential_culprit_tuple_list | |
| 66 | |
| 67 | |
| 68 def _DoAnalysesMatch(analysis_1, analysis_2): | |
| 69 """Checks if two analyses match. | |
| 70 | |
| 71 Args: | |
| 72 analysis_1: The first analysis to compare. | |
| 73 analysis_2: The second analysis to compare. | |
| 74 | |
| 75 Returns: | |
| 76 True if the two analyses' sorted potential culprit lists match, otherwise | |
| 77 False. | |
| 78 """ | |
| 79 | |
| 80 # Get list of potential culprit tuples. | |
| 81 potential_culprit_tuple_list_1 = _GenPotentialCulpritTupleList(analysis_1) | |
| 82 potential_culprit_tuple_list_2 = _GenPotentialCulpritTupleList(analysis_2) | |
| 83 | |
| 84 # Both analyses must have non-empty potential culprit lists. | |
| 85 if not potential_culprit_tuple_list_1 or not potential_culprit_tuple_list_2: | |
| 29 return False | 86 return False |
| 30 | 87 |
| 31 if correct: | 88 # Both analyses must have matching potential culprit lists. |
| 89 return (sorted(potential_culprit_tuple_list_1) == | |
| 90 sorted(potential_culprit_tuple_list_2)) | |
| 91 | |
| 92 | |
| 93 def _AppendTriageHistoryRecord(analysis, is_correct, user_name): | |
| 94 """Appends a triage history record to the given analysis. | |
| 95 | |
| 96 Args: | |
| 97 analysis: The analysis to which to append the history record. | |
| 98 is_correct: True if the history record should indicate a correct judgement, | |
| 99 otherwise False. | |
| 100 user_name: The user_name of the person to include in the triage record. | |
| 101 """ | |
| 102 if is_correct: | |
| 32 if analysis.suspected_cls: | 103 if analysis.suspected_cls: |
| 33 analysis.result_status = result_status.FOUND_CORRECT | 104 analysis.result_status = result_status.FOUND_CORRECT |
| 34 analysis.culprit_cls = analysis.suspected_cls | 105 analysis.culprit_cls = analysis.suspected_cls |
| 35 else: | 106 else: |
| 36 analysis.result_status = result_status.NOT_FOUND_CORRECT | 107 analysis.result_status = result_status.NOT_FOUND_CORRECT |
| 37 analysis.culprit_cls = None | 108 analysis.culprit_cls = None |
| 38 else: | 109 else: |
| 39 analysis.culprit_cls = None | 110 analysis.culprit_cls = None |
| 40 if analysis.suspected_cls: | 111 if analysis.suspected_cls: |
| 41 analysis.result_status = result_status.FOUND_INCORRECT | 112 analysis.result_status = result_status.FOUND_INCORRECT |
| 42 else: | 113 else: |
| 43 analysis.result_status = result_status.NOT_FOUND_INCORRECT | 114 analysis.result_status = result_status.NOT_FOUND_INCORRECT |
| 44 | 115 |
| 45 triage_record = { | 116 triage_record = { |
| 46 'triage_timestamp': calendar.timegm(datetime.utcnow().timetuple()), | 117 'triage_timestamp': calendar.timegm(datetime.utcnow().timetuple()), |
| 47 'user_name': user_name, | 118 'user_name': user_name, |
| 48 'result_status': analysis.result_status, | 119 'result_status': analysis.result_status, |
| 49 'version': analysis.version, | 120 'version': analysis.version, |
| 50 } | 121 } |
| 51 if not analysis.triage_history: | 122 if not analysis.triage_history: |
| 52 analysis.triage_history = [] | 123 analysis.triage_history = [] |
| 53 analysis.triage_history.append(triage_record) | 124 analysis.triage_history.append(triage_record) |
| 54 | 125 |
| 55 analysis.put() | 126 analysis.put() |
| 56 return True | 127 |
| 128 | |
| 129 @ndb.transactional | |
|
stgao
2016/06/24 01:07:23
This looks good to me.
| |
| 130 def _UpdateAnalysisResultStatus( | |
| 131 master_name, builder_name, build_number, is_correct, user_name=None): | |
| 132 analysis = WfAnalysis.Get(master_name, builder_name, build_number) | |
| 133 if not analysis or not analysis.completed: | |
| 134 return False, None | |
| 135 | |
| 136 _AppendTriageHistoryRecord(analysis, is_correct, user_name) | |
| 137 | |
| 138 return True, analysis | |
| 139 | |
| 140 | |
| 141 def _GetDuplicateAnalyses(original_analysis): | |
| 142 start_time = (original_analysis.build_start_time - | |
| 143 timedelta(hours=MATCHING_ANALYSIS_HOURS_AGO_START)) | |
| 144 end_time = (original_analysis.build_start_time + | |
| 145 timedelta(hours=MATCHING_ANALYSIS_HOURS_AGO_END)) | |
| 146 | |
| 147 # Don't count any analyses from today (except for exactly at midnight PST). | |
| 148 # Get current time (UTC) | |
| 149 current_time_as_utc = pytz.utc.localize(datetime.utcnow()) | |
|
stgao
2016/06/24 01:07:23
Just curious about the context on making the limit
lijeffrey
2016/06/24 01:30:06
isn't current_time_as_utc just datetime.utcnow()?
chanli
2016/06/24 16:51:56
This is to copy what we're doing right now when we
josiahk
2016/06/24 18:12:39
No, because datetime.utcnow() doesn't contain time
| |
| 150 | |
| 151 # Convert to pacific time | |
|
lijeffrey
2016/06/24 01:30:06
nit: comments end with .
josiahk
2016/06/24 18:12:39
Done.
| |
| 152 current_time_as_pacific = current_time_as_utc.astimezone( | |
| 153 pytz.timezone(MATCHING_ANALYSIS_END_BOUND_TIME_ZONE)) | |
|
lijeffrey
2016/06/24 01:30:06
is there a reason we're using pst and not utc?
chanli
2016/06/24 16:51:56
This is to copy what we're doing right now when we
| |
| 154 | |
| 155 # Set hours and minutes to 0 to get midnight | |
| 156 pacific_midnight_as_pacific = current_time_as_pacific.replace( | |
| 157 hour=0, minute=0, second=0, microsecond=0) | |
| 158 | |
| 159 # Convert back to UTC time | |
| 160 pacific_midnight_as_utc = pacific_midnight_as_pacific.astimezone(pytz.utc) | |
| 161 | |
| 162 # Strip timezone | |
| 163 pacific_midnight = pacific_midnight_as_utc.replace(tzinfo=None) | |
| 164 | |
| 165 if end_time > pacific_midnight: | |
| 166 end_time = pacific_midnight | |
| 167 | |
| 168 # Retrieve potential duplicate build analyses. | |
| 169 analysis_results = WfAnalysis.query(ndb.AND( | |
| 170 WfAnalysis.build_start_time >= start_time, | |
| 171 WfAnalysis.build_start_time <= end_time, | |
| 172 WfAnalysis.result_status == result_status.FOUND_UNTRIAGED | |
| 173 )).fetch() | |
| 174 | |
| 175 # Further filter potential duplicates and return them. | |
| 176 return [analysis for analysis in analysis_results if | |
| 177 _DoAnalysesMatch(original_analysis, analysis) and | |
| 178 original_analysis.key is not analysis.key and | |
| 179 analysis.completed] | |
| 180 | |
| 181 | |
| 182 def _TriageDuplicateResults(original_analysis, is_correct, user_name=None): | |
| 183 matching_analyses = _GetDuplicateAnalyses(original_analysis) | |
| 184 | |
| 185 for analysis in matching_analyses: | |
| 186 _AppendTriageHistoryRecord(analysis, is_correct, user_name) | |
| 57 | 187 |
| 58 | 188 |
| 59 class TriageAnalysis(BaseHandler): | 189 class TriageAnalysis(BaseHandler): |
| 60 PERMISSION_LEVEL = Permission.CORP_USER | 190 PERMISSION_LEVEL = Permission.CORP_USER |
| 61 | 191 |
| 62 def HandleGet(self): # pragma: no cover | 192 def HandleGet(self): # pragma: no cover |
| 63 return self.HandlePost() | 193 return self.HandlePost() |
| 64 | 194 |
| 65 def HandlePost(self): | 195 def HandlePost(self): |
| 66 """Sets the manual triage result for the analysis. | 196 """Sets the manual triage result for the analysis. |
| 67 | 197 |
| 68 Mark the analysis result as correct/wrong/etc. | 198 Mark the analysis result as correct/wrong/etc. |
| 69 TODO: make it possible to set the real culprit CLs. | 199 TODO: make it possible to set the real culprit CLs. |
| 70 """ | 200 """ |
| 71 url = self.request.get('url').strip() | 201 url = self.request.get('url').strip() |
| 72 build_info = buildbot.ParseBuildUrl(url) | 202 build_info = buildbot.ParseBuildUrl(url) |
| 73 if not build_info: | 203 if not build_info: |
| 74 return {'data': {'success': False}} | 204 return {'data': {'success': False}} |
| 75 master_name, builder_name, build_number = build_info | 205 master_name, builder_name, build_number = build_info |
| 76 | 206 |
| 77 correct = self.request.get('correct').lower() == 'true' | 207 is_correct = self.request.get('correct').lower() == 'true' |
| 78 # As the permission level is CORP_USER, we could assume the current user | 208 # As the permission level is CORP_USER, we could assume the current user |
| 79 # already logged in. | 209 # already logged in. |
| 80 user_name = users.get_current_user().email().split('@')[0] | 210 user_name = users.get_current_user().email().split('@')[0] |
| 81 success = _UpdateAnalysisResultStatus( | 211 success, original_analysis = _UpdateAnalysisResultStatus( |
| 82 master_name, builder_name, build_number, correct, user_name) | 212 master_name, builder_name, build_number, is_correct, user_name) |
| 213 if success: | |
| 214 _TriageDuplicateResults(original_analysis, is_correct, user_name) | |
| 83 return {'data': {'success': success}} | 215 return {'data': {'success': success}} |
| OLD | NEW |