Chromium Code Reviews| Index: appengine/findit/crash/test/crash_pipeline_test.py |
| diff --git a/appengine/findit/crash/test/crash_pipeline_test.py b/appengine/findit/crash/test/crash_pipeline_test.py |
| index 634fb7972de839340f775a5dbce4114832189fdd..b9c3ed395bb66b3b195205c34fc9d4db4d58eb5a 100644 |
| --- a/appengine/findit/crash/test/crash_pipeline_test.py |
| +++ b/appengine/findit/crash/test/crash_pipeline_test.py |
| @@ -3,169 +3,169 @@ |
| # found in the LICENSE file. |
| import copy |
| import json |
| +import logging |
| from google.appengine.api import app_identity |
| +from webtest.app import AppError |
| +from common import chromium_deps |
| from common.pipeline_wrapper import pipeline_handlers |
| from crash import crash_pipeline |
| -from crash import findit_for_chromecrash |
| +from crash.culprit import Culprit |
| +from crash.findit_for_chromecrash import FinditForFracas |
| +from crash.type_enums import CrashClient |
| from crash.test.crash_testcase import CrashTestCase |
| from model import analysis_status |
| from model.crash.fracas_crash_analysis import FracasCrashAnalysis |
| -class CrashPipelineTest(CrashTestCase): |
| - app_module = pipeline_handlers._APP |
| - |
| - def testNoAnalysisIfLastOneIsNotFailed(self): |
| - chrome_version = '1' |
| - signature = 'signature' |
| - platform = 'win' |
| +def DummyCrashData( |
| + version='1', |
| + signature='signature', |
| + platform='win', |
| + stack_trace=None, |
| + regression_range=None, |
| + channel='canary', |
| + historical_metadata=None, |
| + crash_identifiers=True, |
| + process_type='browser'): |
| + if crash_identifiers is True: |
| crash_identifiers = { |
| - 'chrome_version': chrome_version, |
| - 'signature': signature, |
| - 'channel': 'canary', |
| - 'platform': platform, |
| - 'process_type': 'browser', |
| + 'chrome_version': version, |
| + 'signature': signature, |
| + 'channel': channel, |
| + 'platform': platform, |
| + 'process_type': process_type, |
| } |
| - for status in (analysis_status.PENDING, analysis_status.RUNNING, |
| - analysis_status.COMPLETED, analysis_status.SKIPPED): |
| - analysis = FracasCrashAnalysis.Create(crash_identifiers) |
| - analysis.status = status |
| - analysis.put() |
| - self.assertFalse(crash_pipeline._NeedsNewAnalysis( |
| - crash_identifiers, chrome_version, signature, 'fracas', |
| - platform, None, {'channel': 'canary'})) |
| - |
| - def testAnalysisNeededIfLastOneFailed(self): |
| - chrome_version = '1' |
| - signature = 'signature' |
| - platform = 'win' |
| - crash_identifiers = { |
| - 'chrome_version': chrome_version, |
| + return { |
| + 'crashed_version': version, |
| 'signature': signature, |
| - 'channel': 'canary', |
| 'platform': platform, |
| - 'process_type': 'browser', |
| - } |
| - analysis = FracasCrashAnalysis.Create(crash_identifiers) |
| - analysis.status = analysis_status.ERROR |
| - analysis.put() |
| - self.assertTrue(crash_pipeline._NeedsNewAnalysis( |
| - crash_identifiers, chrome_version, signature, 'fracas', |
| - platform, None, {'channel': 'canary'})) |
| - |
| - def testAnalysisNeededIfNoAnalysisYet(self): |
| - chrome_version = '1' |
| - signature = 'signature' |
| - platform = 'win' |
| - crash_identifiers = { |
| - 'chrome_version': chrome_version, |
| - 'signature': signature, |
| - 'channel': 'canary', |
| - 'platform': platform, |
| - 'process_type': 'browser', |
| - } |
| - self.assertTrue(crash_pipeline._NeedsNewAnalysis( |
| - crash_identifiers, chrome_version, signature, 'fracas', |
| - platform, None, {'channel': 'canary'})) |
| - |
| - def testUnsupportedChannelOrPlatformSkipped(self): |
| - self.assertFalse( |
| - crash_pipeline.ScheduleNewAnalysisForCrash( |
| - {}, None, None, 'fracas', 'win', |
| - None, {'channel': 'unsupported_channel', |
| - 'historical_metadata': None})) |
| - self.assertFalse( |
| - crash_pipeline.ScheduleNewAnalysisForCrash( |
| - {}, None, None, 'fracas', 'unsupported_platform', |
| - None, {'channel': 'unsupported_channel', |
| - 'historical_metadata': None})) |
| - |
| - def testBlackListSignatureSipped(self): |
| - self.assertFalse( |
| - crash_pipeline.ScheduleNewAnalysisForCrash( |
| - {}, None, 'Blacklist marker signature', 'fracas', 'win', |
| - None, {'channel': 'canary', |
| - 'historical_metadata': None})) |
| - |
| - def testPlatformRename(self): |
| - def _MockNeedsNewAnalysis(*args): |
| - self.assertEqual(args, |
| - ({}, None, 'signature', 'fracas', 'unix', None, |
| - {'channel': 'canary'})) |
| - return False |
| - |
| - self.mock(crash_pipeline, '_NeedsNewAnalysis', _MockNeedsNewAnalysis) |
| - |
| - crash_pipeline.ScheduleNewAnalysisForCrash( |
| - {}, None, 'signature', 'fracas', 'linux', |
| - None, {'channel': 'canary'}) |
| - |
| - def testNoAnalysisNeeded(self): |
| - chrome_version = '1' |
| - signature = 'signature' |
| - platform = 'win' |
| - channel = 'canary' |
| - crash_identifiers = { |
| - 'chrome_version': chrome_version, |
| - 'signature': signature, |
| - 'channel': channel, |
| - 'platform': platform, |
| - 'process_type': 'browser', |
| - } |
| + 'stack_trace': stack_trace, |
| + 'regression_range': regression_range, |
| + 'crash_identifiers': crash_identifiers, |
| + 'customized_data': { |
| + 'historical_metadata': historical_metadata, |
| + 'channel': channel, |
| + }, |
| + } |
| + |
| + |
| +class MockCulprit(object): |
| + """Construct a fake culprit where |ToDicts| returns whatever we please.""" |
| + |
| + def __init__(self, mock_result, mock_tags): |
| + self._result = mock_result |
| + self._tags = mock_tags |
| + |
| + def ToDicts(self): # pragma: no cover |
| + return self._result, self._tags |
| + |
| + |
| +class CrashPipelineTest(CrashTestCase): |
| + app_module = pipeline_handlers._APP |
| + |
| + def testAnalysisAborted(self): |
| + crash_identifiers = DummyCrashData()['crash_identifiers'] |
| analysis = FracasCrashAnalysis.Create(crash_identifiers) |
| - analysis.status = analysis_status.COMPLETED |
| + analysis.status = analysis_status.RUNNING |
| analysis.put() |
| - self.assertFalse( |
| - crash_pipeline.ScheduleNewAnalysisForCrash( |
| - crash_identifiers, chrome_version, signature, 'fracas', |
| - platform, None, {'channel': channel, |
| - 'historical_metadata': None})) |
| + pipeline = crash_pipeline.CrashAnalysisPipeline( |
| + CrashClient.FRACAS, |
| + crash_identifiers) |
| + pipeline._PutAbortedError() |
| + analysis = FracasCrashAnalysis.Get(crash_identifiers) |
| + self.assertEqual(analysis_status.ERROR, analysis.status) |
| + |
| + # TODO: this function is a gross hack. We should figure out what the |
| + # semantic goal really is here, so we can avoid doing such intricate |
| + # and fragile mocking. |
| def _TestRunningAnalysisForResult(self, analysis_result, analysis_tags): |
| + |
| + # Mock out the part of PublishResultPipeline that would go over the wire. |
| pubsub_publish_requests = [] |
| def Mocked_PublishMessagesToTopic(messages_data, topic): |
| pubsub_publish_requests.append((messages_data, topic)) |
| self.mock(crash_pipeline.pubsub_util, 'PublishMessagesToTopic', |
| Mocked_PublishMessagesToTopic) |
| - analyzed_crashes = [] |
| - class Mocked_FinditForChromeCrash(object): |
| - def __init__(self, *_): |
| - pass |
| - def FindCulprit(self, *args): |
| - analyzed_crashes.append(args) |
| - return analysis_result, analysis_tags |
| - self.mock(findit_for_chromecrash, 'FinditForChromeCrash', |
| - Mocked_FinditForChromeCrash) |
| - |
| - chrome_version = '1' |
| - signature = 'signature' |
| - platform = 'win' |
| - channel = 'canary' |
| - crash_identifiers = { |
| - 'chrome_version': chrome_version, |
| - 'signature': signature, |
| - 'channel': channel, |
| - 'platform': platform, |
| - 'process_type': 'browser', |
| - } |
| - stack_trace = 'frame1\nframe2\nframe3' |
| - chrome_version = '50.2500.0.1' |
| - historical_metadata = None |
| - |
| mock_host = 'https://host.com' |
| self.mock(app_identity, 'get_default_version_hostname', lambda: mock_host) |
| - self.assertTrue( |
| - crash_pipeline.ScheduleNewAnalysisForCrash( |
| - crash_identifiers, chrome_version, signature, 'fracas', |
| - platform, stack_trace, |
| - {'channel': channel, 'historical_metadata': historical_metadata})) |
| - |
| - self.execute_queued_tasks() |
| + # TODO: We need to mock out the pipeline so that it doesn't go over |
| + # the wire, and yet still exercises the code we're trying to unittest. |
| + # TODO: since |FinditForClientID| automatically feeds |
| + # CrashWrapperPipeline in to the Findit constructor; this mock |
| + # probably won't work. |
| + class _MockPipeline(crash_pipeline.CrashWrapperPipeline): |
| + def start(self, **kwargs): |
| + logging.info('Mock running on queue %s', kwargs['queue_name']) |
| + # TODO: the code below helps to improve the code coverage |
| + # percentage, but it leads ot even more verbose failures for |
| + # everything that calls _TestRunningAnalysisForResult. If we're |
| + # mocking things this way, we should figure out how to do the |
| + # callback loop with what |CrashWrapperPipeline.run| yields. |
| + #analysis_pipeline = crash_pipeline.CrashAnalysisPipeline( |
| + # self._client_id, self._crash_identifiers) |
| + #analysis_pipeline.run() |
| + #analysis_pipeline.finalized() |
| + #publish_pipeline = crash_pipeline.PublishResultPipeline( |
| + # self._client_id, self._crash_identifiers) |
| + #publish_pipeline.run() |
| + #publish_pipeline.finalized() |
| + raise NotImplementedError( |
| + 'how can we mock CrashWrapperPipeline correctly?') |
| + |
| + # Mock out FindCulprit to track the number of times it's called and |
| + # with which arguments. N.B., the pipeline will reconstruct Findit |
| + # objects form their client_id, so we can't mock via subclassing, |
| + # we must mock via |self.mock|. |
| + mock_culprit = MockCulprit(analysis_result, analysis_tags) |
| + analyzed_crashes = [] |
| + def _MockFindCulprit(_self, *args): |
| + analyzed_crashes.append(args) |
| + return mock_culprit |
| + self.mock(FinditForFracas, 'FindCulprit', _MockFindCulprit) |
| + |
| + # The real |ParseStacktrace| calls |GetChromeDependency|, which |
| + # eventually calls |GitRepository.GetSource| and hence goes over |
| + # the wire. Since we mocked out |FindCulprit| to no longer call |
| + # |ParseStacktrace|, it shouldn't matter what the real |
| + # |ParseStacktrace| does. However, since mocking is fragile and it's |
| + # hard to triage what actually went wrong if we do end up going over |
| + # the wire, we mock this out too just to be safe. |
| + def _MockParseStacktrace(_self, _model): |
| + raise AssertionError("ParseStacktrace shouldn't ever be called. " |
| + 'That it was indicates some sort of problem with our mocking code.') |
| + self.mock(FinditForFracas, 'ParseStacktrace', _MockParseStacktrace) |
| + |
| + # More directly address the issue about |GetChromeDependency| going |
| + # over the wire. |
| + def _MockGetChromeDependency(_self, _model): |
| + raise AssertionError("GetChromeDependency shouldn't ever be called. " |
| + 'That it was indicates some sort of problem with our mocking code.') |
| + self.mock(chromium_deps, 'GetChromeDependency', _MockGetChromeDependency) |
| + |
| + crash_data = DummyCrashData( |
| + version = '50.2500.0.1', |
| + stack_trace = 'frame1\nframe2\nframe3') |
| + # This call to ScheduleNewAnalysis is the one that was going over |
| + # the wire previously. We don't want to go over the wire, so we need |
| + # to make sure all thebits and pieces are mocked appropriately. |
| + # |
| + # TODO(wrengr): and in the latest incarnation of our problems here, |
| + # suddenly the call to |self.config| nested deep instide here is |
| + # returning None. |
| + self.assertTrue(FinditForFracas(_MockPipeline).ScheduleNewAnalysis( |
| + crash_data)) |
| + |
| + # We catch and re-raise to clean up the callstack that's reported. |
| + try: |
| + self.execute_queued_tasks() |
| + except AppError, e: |
| + raise e |
| self.assertEqual(1, len(pubsub_publish_requests)) |
| @@ -175,28 +175,30 @@ class CrashPipelineTest(CrashTestCase): |
| 'key=agx0ZXN0YmVkLXRlc3RyQQsSE0ZyYWNhc0NyYXNoQW5hbHlzaXMiKGU2ZWIyNj' |
| 'A2OTBlYTAyMjVjNWNjYTM3ZTNjYTlmYWExOGVmYjVlM2UM') |
| - if 'suspected_cls' in processed_analysis_result: |
| - for cl in processed_analysis_result['suspected_cls']: |
| - cl['confidence'] = round(cl['confidence'], 2) |
| - cl.pop('reason', None) |
| + for cl in processed_analysis_result.get('suspected_cls', []): |
| + cl['confidence'] = round(cl['confidence'], 2) |
| + cl.pop('reason', None) |
| expected_messages_data = [json.dumps({ |
| - 'crash_identifiers': crash_identifiers, |
| - 'client_id': 'fracas', |
| + 'crash_identifiers': crash_data['crash_identifiers'], |
| + 'client_id': CrashClient.FRACAS, |
| 'result': processed_analysis_result, |
| }, sort_keys=True)] |
| self.assertEqual(expected_messages_data, pubsub_publish_requests[0][0]) |
| self.assertEqual(1, len(analyzed_crashes)) |
| self.assertEqual( |
| - (signature, platform, stack_trace, chrome_version, None), |
| + (crash_data['signature'], crash_data['platform'], |
| + crash_data['stack_trace'], crash_data['chrome_version'], None), |
| analyzed_crashes[0]) |
| - analysis = FracasCrashAnalysis.Get(crash_identifiers) |
| + analysis = FracasCrashAnalysis.Get(crash_data['crash_identifiers']) |
| self.assertEqual(analysis_result, analysis.result) |
| return analysis |
|
stgao
2016/10/25 18:03:41
nit: too many empty lines.
wrengr
2016/10/25 19:49:54
Acknowledged.
|
| + |
| + |
| def testRunningAnalysis(self): |
| analysis_result = { |
| 'found': True, |
| @@ -255,23 +257,3 @@ class CrashPipelineTest(CrashTestCase): |
| self.assertTrue(analysis.has_regression_range) |
| self.assertTrue(analysis.found_suspects) |
| self.assertEqual('core', analysis.solution) |
| - |
| - def testAnalysisAborted(self): |
| - chrome_version = '1' |
| - signature = 'signature' |
| - platform = 'win' |
| - crash_identifiers = { |
| - 'chrome_version': chrome_version, |
| - 'signature': signature, |
| - 'channel': 'canary', |
| - 'platform': platform, |
| - 'process_type': 'browser', |
| - } |
| - analysis = FracasCrashAnalysis.Create(crash_identifiers) |
| - analysis.status = analysis_status.RUNNING |
| - analysis.put() |
| - |
| - pipeline = crash_pipeline.CrashAnalysisPipeline(crash_identifiers, 'fracas') |
| - pipeline._SetErrorIfAborted(True) |
| - analysis = FracasCrashAnalysis.Get(crash_identifiers) |
| - self.assertEqual(analysis_status.ERROR, analysis.status) |