Chromium Code Reviews| Index: appengine/findit/handlers/triage_analysis.py |
| diff --git a/appengine/findit/handlers/triage_analysis.py b/appengine/findit/handlers/triage_analysis.py |
| index 12cf4f5872e93e6265079376f36616866839b2a6..1aaf4e3e9a9d7462c6ee854558d537d4bd722e17 100644 |
| --- a/appengine/findit/handlers/triage_analysis.py |
| +++ b/appengine/findit/handlers/triage_analysis.py |
| @@ -9,7 +9,7 @@ TODO: work on an automatic or semi-automatic way to triage analysis result. |
| """ |
| import calendar |
| -from datetime import datetime |
| +from datetime import datetime, timedelta |
|
chanli
2016/06/02 00:55:31
Nit: Imports should be on separate lines.
Please
josiahk
2016/06/07 18:34:41
Done.
|
| from google.appengine.api import users |
| from google.appengine.ext import ndb |
| @@ -20,39 +20,205 @@ from model.wf_analysis import WfAnalysis |
| from model import result_status |
| from waterfall import buildbot |
| +# TODO: Is there a better place to put this constant? |
|
chanli
2016/06/02 00:55:31
I think we can just keep this constant here.
josiahk
2016/06/07 18:34:41
Done.
|
| +# TODO: Change this constant to 1 for production. 7 is helpful to get older |
| +# results. |
|
chanli
2016/06/02 00:55:31
Please remove these TODOs before you committing th
josiahk
2016/06/10 18:34:26
Done.
|
| +MATCHING_ANALYSIS_DAYS_AGO = 7 |
|
stgao
2016/06/03 07:09:34
Leaving it here for this CL is fine. But seems bet
josiahk
2016/06/10 18:34:26
Acknowledged.
|
| + |
| +def genPotentialCulpritTupleList(analysis): |
|
chanli
2016/06/02 00:55:31
Function name nit:
the convention for function na
josiahk
2016/06/07 18:34:41
Done.
|
| + potentialCulpritTupleList = [] |
|
chanli
2016/06/02 00:55:31
Naming nit: variable names should be like 'var_nam
josiahk
2016/06/10 18:34:26
Done.
|
| + |
| + # Iterate through the failures, tests, and suspected_cls, appending potential |
|
chanli
2016/06/02 00:55:31
Nit: Iterates
josiahk
2016/06/07 18:34:41
Done.
|
| + # (step_name, test_name, revision) and (step_name, revision) culprit tuples to |
| + # the list |
|
chanli
2016/06/02 00:55:31
Nit: ending .
josiahk
2016/06/07 18:34:41
Done.
|
| + for failure in analysis.result["failures"]: |
|
lijeffrey
2016/06/06 23:00:59
not sure if this has been mentioned in someone els
josiahk
2016/06/07 18:34:41
Done.
|
| + if "tests" in failure: |
| + for test in failure["tests"]: |
| + if "suspected_cls" in test: |
| + for suspected_cl in test["suspected_cls"]: |
| + potentialCulpritTupleList.append(( |
| + failure["step_name"], |
|
lijeffrey
2016/06/06 23:00:59
4 spaces if putting arguments to a function call o
josiahk
2016/06/10 18:34:26
Done.
|
| + test["test_name"], |
| + suspected_cl["revision"])) |
| + if "suspected_cls" in failure: |
|
chanli
2016/06/02 00:55:30
Shouldn't here be elif?
josiahk
2016/06/07 18:34:41
I don't think so, but maybe you see a case I don't
josiahk
2016/06/10 18:34:26
Fixed with 'else'.
|
| + for suspected_cl in failure["suspected_cls"]: |
| + potentialCulpritTupleList.append(( |
| + failure["step_name"], |
| + suspected_cl["revision"])) |
| + |
| + return potentialCulpritTupleList |
| + |
| +def doAnalysesMatch(analysis1, analysis2): |
|
chanli
2016/06/02 00:55:31
argument name nits
Please refer to:
https://engdo
josiahk
2016/06/07 18:34:41
I have renamed to analysis_1 and analysis_2. Is th
josiahk
2016/06/10 18:34:26
Done.
|
| + # Both analyses must have result property |
| + if not analysis1.result or not analysis2.result: |
| + return False |
| + |
| + # Both analyses must have result['failures'] property |
| + if not analysis1.result['failures'] or not analysis2.result['failures']: |
|
chanli
2016/06/02 00:55:31
analysis1.result.get('failures') or not analysis2.
josiahk
2016/06/07 18:34:41
Ah, thanks! Also, could I use this instead?
if
josiahk
2016/06/10 18:34:26
Relevant code removed.
|
| + return False |
| + |
| + # Get list of potential culprit tuples |
| + potentialCulpritTupleList1 = genPotentialCulpritTupleList(analysis1) |
| + potentialCulpritTupleList2 = genPotentialCulpritTupleList(analysis2) |
| + |
| + # Both analyses must have non-empty potential culprit lists |
| + if not potentialCulpritTupleList1 or not potentialCulpritTupleList2: |
| + return False |
| -@ndb.transactional |
| + # Both analyses must have matching potential culprit lists |
| + if sorted(potentialCulpritTupleList1) != sorted(potentialCulpritTupleList2): |
| + return False |
| + |
| + return True |
| + |
| +def doAnalysesMatchTests(): |
|
chanli
2016/06/02 00:55:31
If this is a test for you to monitor some middle r
josiahk
2016/06/10 18:34:26
Done.
|
| + analysis1 = WfAnalysis() |
| + analysis2 = WfAnalysis() |
| + analysis1760 = WfAnalysis() |
| + analysis1761 = WfAnalysis() |
| + analysis1762 = WfAnalysis() |
| + analysis1.result = {u'failures': [{u'first_failure': 49103, u'step_name': |
| + u'content_unittests on Windows-7-SP1', u'last_pass': 49102, u'supported': |
| + True, u'suspected_cls': []}, {u'first_failure': 49103, u'step_name': |
| + u'content_browsertests on Windows-7-SP1', u'last_pass': 49102, u'supported': |
| + True, u'suspected_cls': []}, {u'first_failure': 49103, u'step_name': |
| + u'browser_tests on Windows-7-SP1', u'last_pass': 49102, u'supported': True, |
| + u'suspected_cls': []}]} |
| + analysis2.result = {u'failures': [{u'first_failure': 1684, u'step_name': |
| + u'interactive_ui_tests', u'suspected_cls': [{u'repo_name': u'chromium', |
| + u'hints': {u'modified site_per_process_interactive_browsertest.cc[540, 464,'+\ |
| + ' 580, 380] (and it was in log)': 4}, u'commit_position': 395500, |
| + u'build_number': 1684, u'revision': |
| + u'3cf9343f4602d4ec11717cb6ff56a793c1d5f84b', u'url': |
|
stgao
2016/06/03 07:09:34
Format and move the test to the corresponding unit
josiahk
2016/06/10 18:34:26
Done.
|
| + u'https://codereview.chromium.org/1914643005', u'score': 4}], u'supported': |
| + True, u'last_pass': 1683}]} |
| + analysis1760.result = {u'failures': [{u'first_failure': 1739, u'last_pass': |
| + None, u'supported': True, u'suspected_cls': [], u'step_name': |
| + u'browser_tests'}, {u'first_failure': 1739, u'last_pass': None, u'supported': |
| + True, u'suspected_cls': [{u'hints': {u'modified'+\ |
| + ' dump_accessibility_browsertest_base.cc (and it was in log)': 2}, |
| + u'build_number': 1753, u'score': 2, u'url': |
| + u'https://codereview.chromium.org/2023453003', u'commit_position': 396751, |
| + u'revision': u'0e8dc209f5e4a6140e43551de0e036324c68a383', u'repo_name': |
| + u'chromium'}], u'step_name': u'content_browsertests'}]} |
| + analysis1761.result = {u'failures': [{u'supported': True, u'last_pass': None, |
| + u'suspected_cls': [], u'step_name': u'browser_tests', u'first_failure': 1740}, |
| + {u'supported': True, u'last_pass': None, u'suspected_cls': [{u'hints': |
| + {u'modified dump_accessibility_browsertest_base.cc (and it was in log)': 2}, |
| + u'url': u'https://codereview.chromium.org/2023453003', u'repo_name': |
| + u'chromium', u'revision': u'0e8dc209f5e4a6140e43551de0e036324c68a383', |
| + u'score': 2, u'commit_position': 396751, u'build_number': 1753}], |
| + u'step_name': u'content_browsertests', u'first_failure': 1740}]} |
| + analysis1762.result = {u'failures': [{u'suspected_cls': [], u'first_failure': |
| + 1741, u'step_name': u'browser_tests', u'last_pass': None, u'supported': True}, |
| + {u'suspected_cls': [{u'url': u'https://codereview.chromium.org/2023453003', |
| + u'build_number': 1753, u'revision': |
| + u'0e8dc209f5e4a6140e43551de0e036324c68a383', u'commit_position': 396751, |
| + u'hints': {u'modified dump_accessibility_browsertest_base.cc (and it was in'+\ |
| + ' log)': 2}, u'repo_name': u'chromium', u'score': 2}, {u'url': |
| + u'https://codereview.chromium.org/2024053003', u'build_number': 1762, |
| + u'revision': u'292b41bbd603ae2f11d239f457a8a5f04387fa85', u'commit_position': |
| + 396830, |
| + u'hints': {u'modified dump_accessibility_events_browsertest.cc (and it'+\ |
| + ' was in log)': 2}, u'repo_name': u'chromium', u'score': 2}, {u'url': |
| + u'https://codereview.chromium.org/2025923002', u'build_number': 1762, |
| + u'revision': u'f6c9ef029e28a6bef28e727cd70751d782963e21', u'commit_position': |
| + 396833, u'hints': {u'modified dump_accessibility_events_browsertest.cc[166]'+\ |
| + ' (and it was in log)': 4}, u'repo_name': u'chromium', u'score': 4}], |
| + u'first_failure': 1741, u'step_name': u'content_browsertests', u'last_pass': |
| + None, u'supported': True}]} |
| + assert not doAnalysesMatch(analysis1, analysis2) # Different analyses |
| + assert not doAnalysesMatch(analysis1, analysis1) # Zero culprit-tuples |
| + assert doAnalysesMatch(analysis2, analysis2) # Has culprit-tuples |
| + assert doAnalysesMatch(analysis1760, analysis1761) |
| + assert not doAnalysesMatch(analysis1761, analysis1762) |
| + |
| +# TODO: Put this back (uncomment @ndb.transactional) |
| +# @ndb.transactional |
|
chanli
2016/06/02 00:55:31
Just a thought: I didn't do the experiment but it
stgao
2016/06/03 07:09:34
As we are now dealing with multiple NDB entities,
josiahk
2016/06/07 18:34:41
Yay! The concept of your idea works!
Except... _Tr
stgao
2016/06/13 23:39:49
That's due to Cross-NDB-group query. I had a comme
|
| def _UpdateAnalysisResultStatus( |
| master_name, builder_name, build_number, correct, user_name=None): |
| + doAnalysesMatchTests() |
|
chanli
2016/06/02 00:55:31
Move this to ./test/triage_analysis_test.py
josiahk
2016/06/10 18:34:26
Done.
|
| + |
| analysis = WfAnalysis.Get(master_name, builder_name, build_number) |
| if not analysis or not analysis.completed: |
| return False |
| - if correct: |
| - if analysis.suspected_cls: |
| - analysis.result_status = result_status.FOUND_CORRECT |
| - analysis.culprit_cls = analysis.suspected_cls |
| - else: |
| - analysis.result_status = result_status.NOT_FOUND_CORRECT |
| - analysis.culprit_cls = None |
| - else: |
| - analysis.culprit_cls = None |
| - if analysis.suspected_cls: |
| - analysis.result_status = result_status.FOUND_INCORRECT |
| + # TODO: Limit query range to only yesterday and not today and yesterday. |
|
chanli
2016/06/02 00:55:31
To do it, you can also specify end date
josiahk
2016/06/07 18:34:41
Done.
|
| + start_date = datetime.utcnow() - timedelta(MATCHING_ANALYSIS_DAYS_AGO) |
| + start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0) |
| + |
| + # TODO: Remove helpful-for-development FOUND_CORRECT and FOUND_INCORRECT |
| + # (use commented code instead). |
| + analysis_results = WfAnalysis.query(ndb.AND( |
| + WfAnalysis.build_start_time >= start_date, |
| + ndb.OR( |
|
chanli
2016/06/02 00:55:31
IMO, You should only query for FOUND_UNTRIAGED her
josiahk
2016/06/10 18:34:25
Done.
|
| + WfAnalysis.result_status == result_status.FOUND_UNTRIAGED, |
| + WfAnalysis.result_status == result_status.FOUND_CORRECT, |
| + WfAnalysis.result_status == result_status.FOUND_INCORRECT |
| + ))).order( |
| + -WfAnalysis.build_start_time).fetch() |
|
chanli
2016/06/02 00:55:31
No need to order
josiahk
2016/06/07 18:34:41
Done.
|
| + # analysis_results = WfAnalysis.query(ndb.AND( |
| + # WfAnalysis.build_start_time >= start_date, |
| + # WfAnalysis.result_status == result_status.FOUND_UNTRIAGED |
| + # )).order( |
| + # -WfAnalysis.build_start_time).fetch() |
| + |
| + # TODO: Remove helpful-for-development print statements |
| + print "############## ALL ANALYSES (%d) ##############" % len( |
| + analysis_results) |
| + for i, a in enumerate(analysis_results): |
|
chanli
2016/06/02 00:55:31
Use descriptive variable names
josiahk
2016/06/10 18:34:26
Relevant code removed.
|
| + print "======= Analysis #%d, Build number: %d =======:" \ |
| + % (i, a.build_number) |
| + print a |
| + |
| + # Generate list of matching build analyses |
| + matchingAnalyses = [a for a in analysis_results if |
| + doAnalysesMatch(a, analysis)] |
| + |
| + # TODO: Detect if current analysis was already in the list of matching |
| + # analyses; if so, don't re-add it. |
| + |
| + # Add current analysis to the list of matching analyses |
| + matchingAnalyses.append(analysis) |
| + |
| + # TODO: Remove helpful-for-development print statements |
| + print "############## MATCHING ANALYSES (%d) ##############" % len( |
| + matchingAnalyses) |
| + for i, ma in enumerate(matchingAnalyses): |
| + print "======= MatchingAnalysis #%d, Build number: %d =======:" \ |
| + % (i, ma.build_number) |
| + print ma |
| + |
| + for a in matchingAnalyses: |
| + if correct: |
| + if a.suspected_cls: |
| + a.result_status = result_status.FOUND_CORRECT |
| + a.culprit_cls = a.suspected_cls |
| + else: |
| + a.result_status = result_status.NOT_FOUND_CORRECT |
| + a.culprit_cls = None |
| else: |
| - analysis.result_status = result_status.NOT_FOUND_INCORRECT |
| - |
| - triage_record = { |
| - 'triage_timestamp': calendar.timegm(datetime.utcnow().timetuple()), |
| - 'user_name': user_name, |
| - 'result_status': analysis.result_status, |
| - 'version': analysis.version, |
| - } |
| - if not analysis.triage_history: |
| - analysis.triage_history = [] |
| - analysis.triage_history.append(triage_record) |
| - |
| - analysis.put() |
| + a.culprit_cls = None |
| + if a.suspected_cls: |
| + a.result_status = result_status.FOUND_INCORRECT |
| + else: |
| + a.result_status = result_status.NOT_FOUND_INCORRECT |
| + |
| + triage_record = { |
| + 'triage_timestamp': calendar.timegm(datetime.utcnow().timetuple()), |
| + 'user_name': user_name, |
| + 'result_status': a.result_status, |
| + 'version': a.version, |
| + } |
| + if not a.triage_history: |
| + a.triage_history = [] |
| + a.triage_history.append(triage_record) |
| + |
| + a.put() |
| return True |