Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2396)

Unified Diff: appengine/findit/handlers/crash/test/crash_handler_test.py

Issue 2663063007: [Predator] Switch from anonymous dict to CrashData. (Closed)
Patch Set: Rebase and fix delta test. Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: appengine/findit/handlers/crash/test/crash_handler_test.py
diff --git a/appengine/findit/handlers/crash/test/crash_handler_test.py b/appengine/findit/handlers/crash/test/crash_handler_test.py
index 3ed8e009f013a57726d2ef7b29712c55dc7b649f..a00f98f6595d570165a1c354ca236d449211cf7f 100644
--- a/appengine/findit/handlers/crash/test/crash_handler_test.py
+++ b/appengine/findit/handlers/crash/test/crash_handler_test.py
@@ -14,6 +14,7 @@ from webtest.app import AppError
from common import chrome_dependency_fetcher
from crash import crash_pipeline
+from crash.crash_pipeline import CrashWrapperPipeline
from crash.findit import Findit
from crash.findit_for_chromecrash import FinditForFracas
from crash.test.predator_testcase import PredatorTestCase
@@ -21,22 +22,8 @@ from crash.type_enums import CrashClient
from handlers.crash import crash_handler
from libs.gitiles import gitiles_repository
from model import analysis_status
+from model.crash.crash_analysis import CrashAnalysis
from model.crash.crash_config import CrashConfig
-from model.crash.fracas_crash_analysis import FracasCrashAnalysis
-
-
-MOCK_GET_REPOSITORY = lambda _: None # pragma: no cover
-
-
-class MockCulprit(object):
- """Construct a fake culprit where ``ToDicts`` returns whatever we please."""
-
- def __init__(self, mock_result, mock_tags):
- self._result = mock_result
- self._tags = mock_tags
-
- def ToDicts(self): # pragma: no cover
- return self._result, self._tags
class CrashHandlerTest(PredatorTestCase):
@@ -44,92 +31,31 @@ class CrashHandlerTest(PredatorTestCase):
('/_ah/push-handlers/crash/fracas', crash_handler.CrashHandler),
], debug=True)
- def testScheduleNewAnalysisWithFailingPolicy(self):
+ def testDoNotScheduleNewAnalysisIfNeedsNewAnalysisReturnsFalse(self):
mock_findit = self.GetMockFindit()
- self.mock(mock_findit, 'CheckPolicy', lambda *_: None)
+ self.mock(mock_findit, 'NeedsNewAnalysis', lambda _: False)
self.mock(crash_pipeline, 'FinditForClientID', lambda *_: mock_findit)
- self.assertFalse(crash_handler.ScheduleNewAnalysis(self.GetDummyCrashData(
- client_id = 'MOCK_CLIENT')))
-
- def testScheduleNewAnalysisWithPlatformRename(self):
- original_crash_data = self.GetDummyCrashData(
- client_id = 'MOCK_CLIENT',
- version = None,
- platform = 'unix',
- crash_identifiers = {})
- renamed_crash_data = copy.deepcopy(original_crash_data)
- renamed_crash_data['platform'] = 'linux'
-
- self.mock(crash_pipeline, 'FinditForClientID',
- lambda *_: self.GetMockFindit(client_id='fracas'))
- self.assertFalse(crash_handler.ScheduleNewAnalysis(original_crash_data))
-
- def testScheduleNewAnalysisSkipsUnsupportedChannel(self):
- self.assertFalse(crash_handler.ScheduleNewAnalysis(self.GetDummyCrashData(
- client_id = CrashClient.FRACAS,
- version = None,
- signature = None,
- crash_identifiers = {},
- channel = 'unsupported_channel')))
-
- def testScheduleNewAnalysisSkipsUnsupportedPlatform(self):
- self.assertFalse(crash_handler.ScheduleNewAnalysis(self.GetDummyCrashData(
- client_id = CrashClient.FRACAS,
- version = None,
- signature = None,
- platform = 'unsupported_platform',
- crash_identifiers = {})))
-
- def testScheduleNewAnalysisSkipsBlackListSignature(self):
- self.assertFalse(crash_handler.ScheduleNewAnalysis(self.GetDummyCrashData(
- client_id = CrashClient.FRACAS,
- version = None,
- signature = 'Blacklist marker signature',
- crash_identifiers = {})))
-
- def testScheduleNewAnalysisSkipsIfAlreadyCompleted(self):
- findit_client = FinditForFracas(MOCK_GET_REPOSITORY, CrashConfig.Get())
- crash_data = self.GetDummyCrashData(client_id = findit_client.client_id)
- crash_identifiers = crash_data['crash_identifiers']
- analysis = findit_client.CreateAnalysis(crash_identifiers)
- analysis.status = analysis_status.COMPLETED
- analysis.put()
- self.assertFalse(crash_handler.ScheduleNewAnalysis(crash_data))
-
- def testAnalysisScheduled(self):
- # We need to mock out the method on Findit itself (rather than using a
- # subclass), since this method only gets called on objects we
- # ourselves don't construct.
- requested_crashes = []
- def _MockScheduleNewAnalysis(crash_data):
- requested_crashes.append(crash_data)
- self.mock(crash_handler, 'ScheduleNewAnalysis', _MockScheduleNewAnalysis)
+ # Check policy failed due to empty client config.
+ self.assertFalse(crash_handler.ScheduleNewAnalysis(
+ self.GetDummyChromeCrashData()))
- self.mock_current_user(user_email='test@chromium.org', is_admin=True)
+ def testScheduleNewAnalysisIfNeedsNewAnalysisReturnsTrue(self):
+ mock_findit = self.GetMockFindit(client_id=CrashClient.FRACAS)
+ self.mock(mock_findit, 'NeedsNewAnalysis', lambda _: True)
+ self.mock(crash_pipeline, 'FinditForClientID', lambda *_: mock_findit)
+ self.assertTrue(crash_handler.ScheduleNewAnalysis(
+ self.GetDummyChromeCrashData(client_id=CrashClient.FRACAS)))
- channel = 'supported_channel'
- platform = 'supported_platform'
- signature = 'signature/here'
+ def testHandlePostScheduleNewAnalysis(self):
chrome_version = '50.2500.0.0'
- crash_data = {
- 'client_id': 'fracas',
- 'platform': platform,
- 'signature': signature,
- 'stack_trace': 'frame1\nframe2\nframe3',
- 'chrome_version': chrome_version,
- 'crash_identifiers': {
- 'chrome_version': chrome_version,
- 'signature': signature,
- 'channel': channel,
- 'platform': platform,
- 'process_type': 'renderer',
- },
- 'customized_data': {
- 'channel': channel,
- 'historical_metadata':
- [{'chrome_version': chrome_version, 'cpm': 0.6}],
- },
- }
+ signature = 'signature/here'
+ channel = 'canary'
+ platform = 'mac'
+ crash_data = self.GetDummyChromeCrashData(
+ client_id=CrashClient.FRACAS,
+ channel=channel, platform=platform,
+ signature=signature, version=chrome_version,
+ process_type='renderer')
request_json_data = {
'message': {
@@ -139,175 +65,10 @@ class CrashHandlerTest(PredatorTestCase):
'subscription': 'subscription',
}
+ self.MockPipeline(
+ CrashWrapperPipeline, True,
+ (crash_data['client_id'], crash_data['crash_identifiers']))
+ self.mock(CrashAnalysis, 'Initialize', lambda *_: None)
+
self.test_app.post_json('/_ah/push-handlers/crash/fracas',
request_json_data)
-
- self.assertEqual(1, len(requested_crashes))
- self.assertEqual(crash_data, requested_crashes[0])
-
- # TODO: this function is a gross hack. We should figure out what the
- # semantic goal really is here, so we can avoid doing such intricate
- # and fragile mocking.
- def _TestRunningAnalysisForResult(self, analysis_result, analysis_tags):
-
- # Mock out the part of PublishResultPipeline that would go over the wire.
- pubsub_publish_requests = []
- def Mocked_PublishMessagesToTopic(messages_data, topic):
- pubsub_publish_requests.append((messages_data, topic))
- self.mock(crash_pipeline.pubsub_util, 'PublishMessagesToTopic',
- Mocked_PublishMessagesToTopic)
-
- MOCK_HOST = 'host.com'
- self.mock(app_identity, 'get_default_version_hostname', lambda: MOCK_HOST)
-
- testcase = self
- MOCK_KEY = 'MOCK_KEY'
-
- # Mock out the wrapper pipeline, so call the other pipelines directly
- # instead of doing the yielding loop and spawning off processes.
- def mock_start_pipeline(self, **kwargs):
- logging.info('Mock running on queue %s', kwargs['queue_name'])
- analysis_pipeline = crash_pipeline.CrashAnalysisPipeline(
- self._client_id, self._crash_identifiers)
- analysis_pipeline.run()
- analysis_pipeline.finalized()
-
- testcase.mock(ndb.Key, 'urlsafe', lambda _self: MOCK_KEY)
- publish_pipeline = crash_pipeline.PublishResultPipeline(
- self._client_id, self._crash_identifiers)
- publish_pipeline.run()
- publish_pipeline.finalized()
- self.mock(crash_pipeline.CrashWrapperPipeline, 'start', mock_start_pipeline)
-
- # Mock out FindCulprit to track the number of times it's called and
- # with which arguments. N.B., the pipeline will reconstruct Findit
- # objects form their client_id, so we can't mock via subclassing,
- # we must mock via ``self.mock``.
- mock_culprit = MockCulprit(analysis_result, analysis_tags)
- analyzed_crashes = []
- def _MockFindCulprit(_self, model):
- analyzed_crashes.append(model)
- return mock_culprit
- self.mock(FinditForFracas, 'FindCulprit', _MockFindCulprit)
-
- # The real ``ParseStacktrace`` calls ``GetChromeDependency``,
- # which eventually calls ``GitRepository.GetSource`` and hence
- # goes over the wire. Since we mocked out ``FindCulprit`` to no
- # longer call ``ParseStacktrace``, it shouldn't matter what the real
- # ``ParseStacktrace`` does. However, since mocking is fragile and it's
- # hard to triage what actually went wrong if we do end up going over
- # the wire, we mock this out too just to be safe.
- def _MockParseStacktrace(_self, _model):
- raise AssertionError("ParseStacktrace shouldn't ever be called. "
- 'That it was indicates some sort of problem with our mocking code.')
- self.mock(FinditForFracas, 'ParseStacktrace', _MockParseStacktrace)
-
- # More directly address the issue about ``GetChromeDependency`` going
- # over the wire.
- def _MockGetChromeDependency(_self, _revision, _platform):
- raise AssertionError("GetChromeDependency shouldn't ever be called. "
- 'That it was indicates some sort of problem with our mocking code.')
- self.mock(chrome_dependency_fetcher.ChromeDependencyFetcher,
- 'GetDependency', _MockGetChromeDependency)
-
- crash_data = self.GetDummyCrashData(
- client_id = CrashClient.FRACAS,
- version = '50.2500.0.1',
- stack_trace = 'frame1\nframe2\nframe3')
- self.assertTrue(crash_handler.ScheduleNewAnalysis(crash_data))
-
- # The catch/re-raise is to clean up the callstack that's reported
- # when things acciddentally go over the wire (and subsequently fail).
- try:
- self.execute_queued_tasks()
- except AppError, e: # pragma: no cover
- raise e
-
- self.assertEqual(1, len(pubsub_publish_requests))
-
- processed_analysis_result = copy.deepcopy(analysis_result)
- processed_analysis_result['feedback_url'] = (
- 'https://%s/crash/fracas-result-feedback?key=%s' % (MOCK_HOST,
- MOCK_KEY))
-
- for cl in processed_analysis_result.get('suspected_cls', []):
- cl['confidence'] = round(cl['confidence'], 2)
- cl.pop('reasons', None)
-
- expected_messages_data = [json.dumps({
- 'crash_identifiers': crash_data['crash_identifiers'],
- 'client_id': CrashClient.FRACAS,
- 'result': processed_analysis_result,
- }, sort_keys=True)]
- self.assertListEqual(expected_messages_data, pubsub_publish_requests[0][0])
- self.assertEqual(1, len(analyzed_crashes))
- analysis = analyzed_crashes[0]
- self.assertTrue(isinstance(analysis, FracasCrashAnalysis))
- self.assertEqual(crash_data['signature'], analysis.signature)
- self.assertEqual(crash_data['platform'], analysis.platform)
- self.assertEqual(crash_data['stack_trace'], analysis.stack_trace)
- self.assertEqual(crash_data['chrome_version'], analysis.crashed_version)
- self.assertEqual(crash_data['regression_range'], analysis.regression_range)
-
- analysis = FracasCrashAnalysis.Get(crash_data['crash_identifiers'])
- self.assertEqual(analysis_result, analysis.result)
- return analysis
-
- def testRunningAnalysis(self):
- analysis_result = {
- 'found': True,
- 'suspected_cls': [],
- 'other_data': 'data',
- }
- analysis_tags = {
- 'found_suspects': True,
- 'has_regression_range': True,
- 'solution': 'core',
- 'unsupported_tag': '',
- }
-
- analysis = self._TestRunningAnalysisForResult(
- analysis_result, analysis_tags)
- self.assertTrue(analysis.has_regression_range)
- self.assertTrue(analysis.found_suspects)
- self.assertEqual('core', analysis.solution)
-
- def testRunningAnalysisNoSuspectsFound(self):
- analysis_result = {
- 'found': False
- }
- analysis_tags = {
- 'found_suspects': False,
- 'has_regression_range': False,
- 'solution': 'core',
- 'unsupported_tag': '',
- }
-
- analysis = self._TestRunningAnalysisForResult(
- analysis_result, analysis_tags)
- self.assertFalse(analysis.has_regression_range)
- self.assertFalse(analysis.found_suspects)
- self.assertEqual('core', analysis.solution)
-
- def testRunningAnalysisWithSuspectsCls(self):
- analysis_result = {
- 'found': True,
- 'suspected_cls': [
- {'confidence': 0.21434,
- 'reasons': ['reason1', 'reason2'],
- 'other': 'data'}
- ],
- 'other_data': 'data',
- }
- analysis_tags = {
- 'found_suspects': True,
- 'has_regression_range': True,
- 'solution': 'core',
- 'unsupported_tag': '',
- }
-
- analysis = self._TestRunningAnalysisForResult(
- analysis_result, analysis_tags)
- self.assertTrue(analysis.has_regression_range)
- self.assertTrue(analysis.found_suspects)
- self.assertEqual('core', analysis.solution)
« no previous file with comments | « appengine/findit/handlers/crash/crash_handler.py ('k') | appengine/findit/handlers/crash/test/fracas_dashboard_test.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698