Chromium Code Reviews| Index: appengine/findit/handlers/triage_analysis.py |
| diff --git a/appengine/findit/handlers/triage_analysis.py b/appengine/findit/handlers/triage_analysis.py |
| index 12cf4f5872e93e6265079376f36616866839b2a6..e341f55ca2b65da61bf46becb36ac97124bc19d6 100644 |
| --- a/appengine/findit/handlers/triage_analysis.py |
| +++ b/appengine/findit/handlers/triage_analysis.py |
| @@ -10,24 +10,95 @@ TODO: work on an automatic or semi-automatic way to triage analysis result. |
| import calendar |
| from datetime import datetime |
| - |
| -from google.appengine.api import users |
| -from google.appengine.ext import ndb |
| - |
| +from datetime import timedelta |
|
chanli
2016/06/13 20:21:04
nit: need an empty line below.
josiahk
2016/06/21 00:56:54
Done.
|
| from common.base_handler import BaseHandler |
| from common.base_handler import Permission |
| -from model.wf_analysis import WfAnalysis |
| from model import result_status |
| +from model.wf_analysis import WfAnalysis |
| from waterfall import buildbot |
|
chanli
2016/06/13 20:21:04
Nit: move ln 14 - 18 under ln 22. And also leave 2
josiahk
2016/06/21 00:56:54
Done.
|
| +from google.appengine.api import users |
| +from google.appengine.ext import ndb |
|
stgao
2016/06/13 23:39:49
for imports, there is an order for different group
josiahk
2016/06/21 00:56:53
Thanks!
|
| -@ndb.transactional |
| -def _UpdateAnalysisResultStatus( |
| - master_name, builder_name, build_number, correct, user_name=None): |
| - analysis = WfAnalysis.Get(master_name, builder_name, build_number) |
| - if not analysis or not analysis.completed: |
| + |
| +MATCHING_ANALYSIS_DAYS_AGO_START = 1 |
| +MATCHING_ANALYSIS_DAYS_AGO_END = 1 |
| + |
| + |
| +def _GenPotentialCulpritTupleList(analysis): |
| + """Generates a list of potential culprit tuples. |
| + |
| + Args: |
| + analysis: the analysis from which to generate a potenial culript tuple list. |
| + |
| + Returns: |
| + A list of cultprit tuples that each could look like: |
| + |
| + (step_name, test_name, revision) |
| + |
| + or could look like: |
| + |
| + (step_name, revision) |
| + """ |
| + potential_culprit_tuple_list = [] |
| + |
| + # Iterates through the failures, tests, and suspected_cls, appending potential |
| + # (step_name, test_name, revision) and (step_name, revision) culprit tuples to |
| + # the list. |
| + for failure in analysis.result['failures']: |
| + if failure.get('tests'): |
| + for test in failure['tests']: |
| + for suspected_cl in test.get('suspected_cls', []): |
| + potential_culprit_tuple_list.append(( |
| + failure['step_name'], |
| + test['test_name'], |
| + suspected_cl['revision'])) |
| + else: |
| + for suspected_cl in failure['suspected_cls']: |
| + potential_culprit_tuple_list.append(( |
| + failure['step_name'], |
| + suspected_cl['revision'])) |
| + |
| + return potential_culprit_tuple_list |
| + |
| + |
| +def _DoAnalysesMatch(analysis_1, analysis_2): |
| + """Checks if two analyses match. |
| + |
| + Args: |
| + analysis_1: The first analysis to compare |
| + analysis_2: The second analysis to compare |
| + |
| + Returns: |
| + True if the two analyses' sorted potential culprit lists match, otherwise |
| + False. |
| + """ |
| + |
| + # Get list of potential culprit tuples |
| + potential_culprit_tuple_list_1 = _GenPotentialCulpritTupleList(analysis_1) |
| + potential_culprit_tuple_list_2 = _GenPotentialCulpritTupleList(analysis_2) |
| + |
| + # Both analyses must have non-empty potential culprit lists |
| + if not potential_culprit_tuple_list_1 or not potential_culprit_tuple_list_2: |
| return False |
| + # Both analyses must have matching potential culprit lists |
| + if (sorted(potential_culprit_tuple_list_1) != |
|
lijeffrey
2016/06/13 18:15:06
you can just do
return sorted(potential_culprit_t
josiahk
2016/06/21 00:56:54
Done. Thanks!
|
| + sorted(potential_culprit_tuple_list_2)): |
| + return False |
| + |
| + return True |
| + |
| + |
| +def _AppendTriageHistoryRecord(analysis, correct, user_name): |
| + """Appends a triage history record to the given analysis. |
| + |
| + Args: |
| + analysis: The analysis to which to append the history record |
|
lijeffrey
2016/06/13 18:15:06
. after sentences.
josiahk
2016/06/21 00:56:54
Done.
|
| + correct: True if the history record should indicate a correct judgement, |
| + otherwise False. |
| + user_name: The user_name of the person to include in the triage record |
| + """ |
| if correct: |
| if analysis.suspected_cls: |
| analysis.result_status = result_status.FOUND_CORRECT |
| @@ -53,6 +124,45 @@ def _UpdateAnalysisResultStatus( |
| analysis.triage_history.append(triage_record) |
| analysis.put() |
| + |
| + |
| +@ndb.transactional |
| +def _UpdateAnalysisResultStatus( |
| + master_name, builder_name, build_number, correct, user_name=None): |
| + analysis = WfAnalysis.Get(master_name, builder_name, build_number) |
| + if not analysis or not analysis.completed: |
| + return False |
| + |
| + _AppendTriageHistoryRecord(analysis, correct, user_name) |
| + |
| + return True, analysis |
| + |
| +# TODO: Add @ndb.transactional? |
|
lijeffrey
2016/06/13 18:15:06
do some research as to what ndb.transactional mean
|
| +def _TriageDuplicateResults(original_analysis, correct, user_name=None): |
| + # QUESTION: Instead of looking for analyses with start_dates relative to |
|
lijeffrey
2016/06/13 18:15:06
when you find out the answer take this comment out
chanli
2016/06/13 20:21:04
I think your idea makes sense, but how relative wo
josiahk
2016/06/21 00:56:54
Thanks! I have taken the comment out. I also talke
|
| + # today, should we be looking for analyses with start_dates relative to the |
| + # date of the original analysis? |
|
stgao
2016/06/13 23:39:49
Should it be relative to the wf_analysis.WfAnalysi
josiahk
2016/06/21 00:56:54
Yes! Thank you!
|
| + |
| + start_date = datetime.utcnow() - timedelta(MATCHING_ANALYSIS_DAYS_AGO_START) |
| + start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0) |
| + end_date = datetime.utcnow() - timedelta(MATCHING_ANALYSIS_DAYS_AGO_END - 1) |
|
chanli
2016/06/13 20:21:03
Why didn't make MATCHING_ANALYSIS_DAYS_AGO_END = 0
josiahk
2016/06/21 00:56:53
Weird code removed. Underlying logic changed.
|
| + end_date = end_date.replace(hour=0, minute=0, second=0, microsecond=0) |
| + |
| + analysis_results = WfAnalysis.query(ndb.AND( |
| + WfAnalysis.build_start_time >= start_date, |
| + WfAnalysis.build_start_time < end_date, |
| + WfAnalysis.result_status == result_status.FOUND_UNTRIAGED |
| + )).fetch() |
| + |
| + # Generate list of matching build analyses |
| + matching_analyses = [a for a in analysis_results if |
| + _DoAnalysesMatch(original_analysis, a) and |
| + original_analysis.key is not a.key] |
| + |
| + for a in matching_analyses: |
|
lijeffrey
2016/06/13 18:15:06
nit: use meaningful variable names
josiahk
2016/06/21 00:56:54
Done.
|
| + _AppendTriageHistoryRecord(a, correct, user_name) |
| + |
| + # QUESTION: Should this function ever return anything but True? |
|
lijeffrey
2016/06/13 18:15:06
see lines 133-134
josiahk
2016/06/21 00:56:54
Thanks!
|
| return True |
| @@ -78,6 +188,8 @@ class TriageAnalysis(BaseHandler): |
| # As the permission level is CORP_USER, we could assume the current user |
| # already logged in. |
| user_name = users.get_current_user().email().split('@')[0] |
| - success = _UpdateAnalysisResultStatus( |
| + success_and_maybe_analysis = _UpdateAnalysisResultStatus( |
| master_name, builder_name, build_number, correct, user_name) |
| + success = not not success_and_maybe_analysis and _TriageDuplicateResults( |
|
lijeffrey
2016/06/13 18:15:06
not not shouldn't be necessary
chanli
2016/06/13 20:21:04
Based your current change, how could you display t
josiahk
2016/06/21 00:56:54
Done.
josiahk
2016/06/21 00:56:54
The original analysis link information will hopefu
|
| + success_and_maybe_analysis[1], correct, user_name) |
| return {'data': {'success': success}} |