| OLD | NEW |
| 1 # Copyright 2016 The Chromium Authors. All rights reserved. | 1 # Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 import copy | 4 import copy |
| 5 import json | 5 import json |
| 6 import logging |
| 6 | 7 |
| 7 from google.appengine.api import app_identity | 8 from google.appengine.api import app_identity |
| 9 from google.appengine.ext import ndb |
| 10 from webtest.app import AppError |
| 8 | 11 |
| 12 from common import chrome_dependency_fetcher |
| 9 from common.pipeline_wrapper import pipeline_handlers | 13 from common.pipeline_wrapper import pipeline_handlers |
| 10 from crash import crash_pipeline | 14 from crash import crash_pipeline |
| 11 from crash import findit_for_chromecrash | 15 from crash.culprit import Culprit |
| 16 from crash.findit_for_chromecrash import FinditForFracas |
| 17 from crash.type_enums import CrashClient |
| 12 from crash.test.crash_testcase import CrashTestCase | 18 from crash.test.crash_testcase import CrashTestCase |
| 13 from model import analysis_status | 19 from model import analysis_status |
| 14 from model.crash.fracas_crash_analysis import FracasCrashAnalysis | 20 from model.crash.fracas_crash_analysis import FracasCrashAnalysis |
| 15 | 21 |
| 22 def DummyCrashData( |
| 23 version='1', |
| 24 signature='signature', |
| 25 platform='win', |
| 26 stack_trace=None, |
| 27 regression_range=None, |
| 28 channel='canary', |
| 29 historical_metadata=None, |
| 30 crash_identifiers=True, |
| 31 process_type='browser'): |
| 32 if crash_identifiers is True: # pragma: no cover |
| 33 crash_identifiers = { |
| 34 'chrome_version': version, |
| 35 'signature': signature, |
| 36 'channel': channel, |
| 37 'platform': platform, |
| 38 'process_type': process_type, |
| 39 } |
| 40 return { |
| 41 'crashed_version': version, |
| 42 'signature': signature, |
| 43 'platform': platform, |
| 44 'stack_trace': stack_trace, |
| 45 'regression_range': regression_range, |
| 46 'crash_identifiers': crash_identifiers, |
| 47 'customized_data': { |
| 48 'historical_metadata': historical_metadata, |
| 49 'channel': channel, |
| 50 }, |
| 51 } |
| 52 |
| 53 |
| 54 class MockCulprit(object): |
| 55 """Construct a fake culprit where |ToDicts| returns whatever we please.""" |
| 56 |
| 57 def __init__(self, mock_result, mock_tags): |
| 58 self._result = mock_result |
| 59 self._tags = mock_tags |
| 60 |
| 61 def ToDicts(self): # pragma: no cover |
| 62 return self._result, self._tags |
| 63 |
| 16 | 64 |
| 17 class CrashPipelineTest(CrashTestCase): | 65 class CrashPipelineTest(CrashTestCase): |
| 18 app_module = pipeline_handlers._APP | 66 app_module = pipeline_handlers._APP |
| 19 | 67 |
| 20 def testNoAnalysisIfLastOneIsNotFailed(self): | 68 def testAnalysisAborted(self): |
| 21 chrome_version = '1' | 69 crash_identifiers = DummyCrashData()['crash_identifiers'] |
| 22 signature = 'signature' | |
| 23 platform = 'win' | |
| 24 crash_identifiers = { | |
| 25 'chrome_version': chrome_version, | |
| 26 'signature': signature, | |
| 27 'channel': 'canary', | |
| 28 'platform': platform, | |
| 29 'process_type': 'browser', | |
| 30 } | |
| 31 for status in (analysis_status.PENDING, analysis_status.RUNNING, | |
| 32 analysis_status.COMPLETED, analysis_status.SKIPPED): | |
| 33 analysis = FracasCrashAnalysis.Create(crash_identifiers) | |
| 34 analysis.status = status | |
| 35 analysis.put() | |
| 36 self.assertFalse(crash_pipeline._NeedsNewAnalysis( | |
| 37 crash_identifiers, chrome_version, signature, 'fracas', | |
| 38 platform, None, {'channel': 'canary'})) | |
| 39 | |
| 40 def testAnalysisNeededIfLastOneFailed(self): | |
| 41 chrome_version = '1' | |
| 42 signature = 'signature' | |
| 43 platform = 'win' | |
| 44 crash_identifiers = { | |
| 45 'chrome_version': chrome_version, | |
| 46 'signature': signature, | |
| 47 'channel': 'canary', | |
| 48 'platform': platform, | |
| 49 'process_type': 'browser', | |
| 50 } | |
| 51 analysis = FracasCrashAnalysis.Create(crash_identifiers) | 70 analysis = FracasCrashAnalysis.Create(crash_identifiers) |
| 52 analysis.status = analysis_status.ERROR | 71 analysis.status = analysis_status.RUNNING |
| 53 analysis.put() | |
| 54 self.assertTrue(crash_pipeline._NeedsNewAnalysis( | |
| 55 crash_identifiers, chrome_version, signature, 'fracas', | |
| 56 platform, None, {'channel': 'canary'})) | |
| 57 | |
| 58 def testAnalysisNeededIfNoAnalysisYet(self): | |
| 59 chrome_version = '1' | |
| 60 signature = 'signature' | |
| 61 platform = 'win' | |
| 62 crash_identifiers = { | |
| 63 'chrome_version': chrome_version, | |
| 64 'signature': signature, | |
| 65 'channel': 'canary', | |
| 66 'platform': platform, | |
| 67 'process_type': 'browser', | |
| 68 } | |
| 69 self.assertTrue(crash_pipeline._NeedsNewAnalysis( | |
| 70 crash_identifiers, chrome_version, signature, 'fracas', | |
| 71 platform, None, {'channel': 'canary'})) | |
| 72 | |
| 73 def testUnsupportedChannelOrPlatformSkipped(self): | |
| 74 self.assertFalse( | |
| 75 crash_pipeline.ScheduleNewAnalysisForCrash( | |
| 76 {}, None, None, 'fracas', 'win', | |
| 77 None, {'channel': 'unsupported_channel', | |
| 78 'historical_metadata': None})) | |
| 79 self.assertFalse( | |
| 80 crash_pipeline.ScheduleNewAnalysisForCrash( | |
| 81 {}, None, None, 'fracas', 'unsupported_platform', | |
| 82 None, {'channel': 'unsupported_channel', | |
| 83 'historical_metadata': None})) | |
| 84 | |
| 85 def testBlackListSignatureSipped(self): | |
| 86 self.assertFalse( | |
| 87 crash_pipeline.ScheduleNewAnalysisForCrash( | |
| 88 {}, None, 'Blacklist marker signature', 'fracas', 'win', | |
| 89 None, {'channel': 'canary', | |
| 90 'historical_metadata': None})) | |
| 91 | |
| 92 def testPlatformRename(self): | |
| 93 def _MockNeedsNewAnalysis(*args): | |
| 94 self.assertEqual(args, | |
| 95 ({}, None, 'signature', 'fracas', 'unix', None, | |
| 96 {'channel': 'canary'})) | |
| 97 return False | |
| 98 | |
| 99 self.mock(crash_pipeline, '_NeedsNewAnalysis', _MockNeedsNewAnalysis) | |
| 100 | |
| 101 crash_pipeline.ScheduleNewAnalysisForCrash( | |
| 102 {}, None, 'signature', 'fracas', 'linux', | |
| 103 None, {'channel': 'canary'}) | |
| 104 | |
| 105 def testNoAnalysisNeeded(self): | |
| 106 chrome_version = '1' | |
| 107 signature = 'signature' | |
| 108 platform = 'win' | |
| 109 channel = 'canary' | |
| 110 crash_identifiers = { | |
| 111 'chrome_version': chrome_version, | |
| 112 'signature': signature, | |
| 113 'channel': channel, | |
| 114 'platform': platform, | |
| 115 'process_type': 'browser', | |
| 116 } | |
| 117 analysis = FracasCrashAnalysis.Create(crash_identifiers) | |
| 118 analysis.status = analysis_status.COMPLETED | |
| 119 analysis.put() | 72 analysis.put() |
| 120 | 73 |
| 121 self.assertFalse( | 74 pipeline = crash_pipeline.CrashAnalysisPipeline( |
| 122 crash_pipeline.ScheduleNewAnalysisForCrash( | 75 CrashClient.FRACAS, |
| 123 crash_identifiers, chrome_version, signature, 'fracas', | 76 crash_identifiers) |
| 124 platform, None, {'channel': channel, | 77 pipeline._PutAbortedError() |
| 125 'historical_metadata': None})) | 78 analysis = FracasCrashAnalysis.Get(crash_identifiers) |
| 79 self.assertEqual(analysis_status.ERROR, analysis.status) |
| 126 | 80 |
| 81 |
| 82 # TODO: this function is a gross hack. We should figure out what the |
| 83 # semantic goal really is here, so we can avoid doing such intricate |
| 84 # and fragile mocking. |
| 127 def _TestRunningAnalysisForResult(self, analysis_result, analysis_tags): | 85 def _TestRunningAnalysisForResult(self, analysis_result, analysis_tags): |
| 86 |
| 87 # Mock out the part of PublishResultPipeline that would go over the wire. |
| 128 pubsub_publish_requests = [] | 88 pubsub_publish_requests = [] |
| 129 def Mocked_PublishMessagesToTopic(messages_data, topic): | 89 def Mocked_PublishMessagesToTopic(messages_data, topic): |
| 130 pubsub_publish_requests.append((messages_data, topic)) | 90 pubsub_publish_requests.append((messages_data, topic)) |
| 131 self.mock(crash_pipeline.pubsub_util, 'PublishMessagesToTopic', | 91 self.mock(crash_pipeline.pubsub_util, 'PublishMessagesToTopic', |
| 132 Mocked_PublishMessagesToTopic) | 92 Mocked_PublishMessagesToTopic) |
| 133 | 93 |
| 94 MOCK_HOST = 'https://host.com' |
| 95 self.mock(app_identity, 'get_default_version_hostname', lambda: MOCK_HOST) |
| 96 |
| 97 testcase = self |
| 98 MOCK_KEY = 'MOCK_KEY' |
| 99 |
| 100 # TODO: We need to mock out the pipeline so that it doesn't go over |
| 101 # the wire, and yet still exercises the code we're trying to unittest. |
| 102 # TODO: since |FinditForClientID| automatically feeds |
| 103 # CrashWrapperPipeline in to the Findit constructor; this mock |
| 104 # probably won't work. |
| 105 class _MockPipeline(crash_pipeline.CrashWrapperPipeline): |
| 106 def start(self, **kwargs): |
| 107 logging.info('Mock running on queue %s', kwargs['queue_name']) |
| 108 analysis_pipeline = crash_pipeline.CrashAnalysisPipeline( |
| 109 self._client_id, self._crash_identifiers) |
| 110 analysis_pipeline.run() |
| 111 analysis_pipeline.finalized() |
| 112 |
| 113 testcase.mock(ndb.Key, 'urlsafe', lambda _self: MOCK_KEY) |
| 114 publish_pipeline = crash_pipeline.PublishResultPipeline( |
| 115 self._client_id, self._crash_identifiers) |
| 116 publish_pipeline.run() |
| 117 publish_pipeline.finalized() |
| 118 |
| 119 # Mock out FindCulprit to track the number of times it's called and |
| 120 # with which arguments. N.B., the pipeline will reconstruct Findit |
| 121 # objects form their client_id, so we can't mock via subclassing, |
| 122 # we must mock via |self.mock|. |
| 123 mock_culprit = MockCulprit(analysis_result, analysis_tags) |
| 134 analyzed_crashes = [] | 124 analyzed_crashes = [] |
| 135 class Mocked_FinditForChromeCrash(object): | 125 def _MockFindCulprit(_self, model): |
| 136 def __init__(self, *_): | 126 analyzed_crashes.append(model) |
| 137 pass | 127 return mock_culprit |
| 138 def FindCulprit(self, *args): | 128 self.mock(FinditForFracas, 'FindCulprit', _MockFindCulprit) |
| 139 analyzed_crashes.append(args) | |
| 140 return analysis_result, analysis_tags | |
| 141 self.mock(findit_for_chromecrash, 'FinditForChromeCrash', | |
| 142 Mocked_FinditForChromeCrash) | |
| 143 | 129 |
| 144 chrome_version = '1' | 130 # The real |ParseStacktrace| calls |GetChromeDependency|, which |
| 145 signature = 'signature' | 131 # eventually calls |GitRepository.GetSource| and hence goes over |
| 146 platform = 'win' | 132 # the wire. Since we mocked out |FindCulprit| to no longer call |
| 147 channel = 'canary' | 133 # |ParseStacktrace|, it shouldn't matter what the real |
| 148 crash_identifiers = { | 134 # |ParseStacktrace| does. However, since mocking is fragile and it's |
| 149 'chrome_version': chrome_version, | 135 # hard to triage what actually went wrong if we do end up going over |
| 150 'signature': signature, | 136 # the wire, we mock this out too just to be safe. |
| 151 'channel': channel, | 137 def _MockParseStacktrace(_self, _model): |
| 152 'platform': platform, | 138 raise AssertionError("ParseStacktrace shouldn't ever be called. " |
| 153 'process_type': 'browser', | 139 'That it was indicates some sort of problem with our mocking code.') |
| 154 } | 140 self.mock(FinditForFracas, 'ParseStacktrace', _MockParseStacktrace) |
| 155 stack_trace = 'frame1\nframe2\nframe3' | |
| 156 chrome_version = '50.2500.0.1' | |
| 157 historical_metadata = None | |
| 158 | 141 |
| 159 mock_host = 'https://host.com' | 142 # More directly address the issue about |GetChromeDependency| going |
| 160 self.mock(app_identity, 'get_default_version_hostname', lambda: mock_host) | 143 # over the wire. |
| 144 def _MockGetChromeDependency(_self, _revision, _platform): |
| 145 raise AssertionError("GetChromeDependency shouldn't ever be called. " |
| 146 'That it was indicates some sort of problem with our mocking code.') |
| 147 self.mock(chrome_dependency_fetcher.ChromeDependencyFetcher, |
| 148 'GetDependency', _MockGetChromeDependency) |
| 161 | 149 |
| 150 crash_data = DummyCrashData( |
| 151 version = '50.2500.0.1', |
| 152 stack_trace = 'frame1\nframe2\nframe3') |
| 153 # A fake repository, needed by the Findit constructor. We should never |
| 154 # go over the wire (e.g., in the call to ScheduleNewAnalysis below), |
| 155 # and this helps ensure that. (The current version of the tests |
| 156 # don't seem to need the repo at all, so None is a sufficient mock |
| 157 # for now.) |
| 158 mock_repository = None |
| 162 self.assertTrue( | 159 self.assertTrue( |
| 163 crash_pipeline.ScheduleNewAnalysisForCrash( | 160 FinditForFracas(mock_repository, _MockPipeline).ScheduleNewAnalysis( |
| 164 crash_identifiers, chrome_version, signature, 'fracas', | 161 crash_data)) |
| 165 platform, stack_trace, | |
| 166 {'channel': channel, 'historical_metadata': historical_metadata})) | |
| 167 | 162 |
| 168 self.execute_queued_tasks() | 163 # The catch/re-raise is to clean up the callstack that's reported |
| 164 # when things acciddentally go over the wire (and subsequently fail). |
| 165 try: |
| 166 self.execute_queued_tasks() |
| 167 except AppError, e: # pragma: no cover |
| 168 raise e |
| 169 | 169 |
| 170 self.assertEqual(1, len(pubsub_publish_requests)) | 170 self.assertEqual(1, len(pubsub_publish_requests)) |
| 171 | 171 |
| 172 processed_analysis_result = copy.deepcopy(analysis_result) | 172 processed_analysis_result = copy.deepcopy(analysis_result) |
| 173 processed_analysis_result['feedback_url'] = ( | 173 processed_analysis_result['feedback_url'] = ( |
| 174 mock_host + '/crash/fracas-result-feedback?' | 174 '%s/crash/fracas-result-feedback?key=%s' % (MOCK_HOST, MOCK_KEY)) |
| 175 'key=agx0ZXN0YmVkLXRlc3RyQQsSE0ZyYWNhc0NyYXNoQW5hbHlzaXMiKGU2ZWIyNj' | |
| 176 'A2OTBlYTAyMjVjNWNjYTM3ZTNjYTlmYWExOGVmYjVlM2UM') | |
| 177 | 175 |
| 178 if 'suspected_cls' in processed_analysis_result: | 176 for cl in processed_analysis_result.get('suspected_cls', []): |
| 179 for cl in processed_analysis_result['suspected_cls']: | 177 cl['confidence'] = round(cl['confidence'], 2) |
| 180 cl['confidence'] = round(cl['confidence'], 2) | 178 cl.pop('reason', None) |
| 181 cl.pop('reason', None) | |
| 182 | 179 |
| 183 expected_messages_data = [json.dumps({ | 180 expected_messages_data = [json.dumps({ |
| 184 'crash_identifiers': crash_identifiers, | 181 'crash_identifiers': crash_data['crash_identifiers'], |
| 185 'client_id': 'fracas', | 182 'client_id': CrashClient.FRACAS, |
| 186 'result': processed_analysis_result, | 183 'result': processed_analysis_result, |
| 187 }, sort_keys=True)] | 184 }, sort_keys=True)] |
| 188 self.assertEqual(expected_messages_data, pubsub_publish_requests[0][0]) | 185 self.assertListEqual(expected_messages_data, pubsub_publish_requests[0][0]) |
| 186 self.assertEqual(1, len(analyzed_crashes)) |
| 187 analysis = analyzed_crashes[0] |
| 188 self.assertTrue(isinstance(analysis, FracasCrashAnalysis)) |
| 189 self.assertEqual(crash_data['signature'], analysis.signature) |
| 190 self.assertEqual(crash_data['platform'], analysis.platform) |
| 191 self.assertEqual(crash_data['stack_trace'], analysis.stack_trace) |
| 192 self.assertEqual(crash_data['crashed_version'], analysis.crashed_version) |
| 193 self.assertEqual(crash_data['regression_range'], analysis.regression_range) |
| 189 | 194 |
| 190 self.assertEqual(1, len(analyzed_crashes)) | 195 analysis = FracasCrashAnalysis.Get(crash_data['crash_identifiers']) |
| 191 self.assertEqual( | |
| 192 (signature, platform, stack_trace, chrome_version, None), | |
| 193 analyzed_crashes[0]) | |
| 194 | |
| 195 analysis = FracasCrashAnalysis.Get(crash_identifiers) | |
| 196 self.assertEqual(analysis_result, analysis.result) | 196 self.assertEqual(analysis_result, analysis.result) |
| 197 return analysis | 197 return analysis |
| 198 | 198 |
| 199 | |
| 200 def testRunningAnalysis(self): | 199 def testRunningAnalysis(self): |
| 201 analysis_result = { | 200 analysis_result = { |
| 202 'found': True, | 201 'found': True, |
| 203 'suspected_cls': [], | 202 'suspected_cls': [], |
| 204 'other_data': 'data', | 203 'other_data': 'data', |
| 205 } | 204 } |
| 206 analysis_tags = { | 205 analysis_tags = { |
| 207 'found_suspects': True, | 206 'found_suspects': True, |
| 208 'has_regression_range': True, | 207 'has_regression_range': True, |
| 209 'solution': 'core', | 208 'solution': 'core', |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 248 'has_regression_range': True, | 247 'has_regression_range': True, |
| 249 'solution': 'core', | 248 'solution': 'core', |
| 250 'unsupported_tag': '', | 249 'unsupported_tag': '', |
| 251 } | 250 } |
| 252 | 251 |
| 253 analysis = self._TestRunningAnalysisForResult( | 252 analysis = self._TestRunningAnalysisForResult( |
| 254 analysis_result, analysis_tags) | 253 analysis_result, analysis_tags) |
| 255 self.assertTrue(analysis.has_regression_range) | 254 self.assertTrue(analysis.has_regression_range) |
| 256 self.assertTrue(analysis.found_suspects) | 255 self.assertTrue(analysis.found_suspects) |
| 257 self.assertEqual('core', analysis.solution) | 256 self.assertEqual('core', analysis.solution) |
| 258 | |
| 259 def testAnalysisAborted(self): | |
| 260 chrome_version = '1' | |
| 261 signature = 'signature' | |
| 262 platform = 'win' | |
| 263 crash_identifiers = { | |
| 264 'chrome_version': chrome_version, | |
| 265 'signature': signature, | |
| 266 'channel': 'canary', | |
| 267 'platform': platform, | |
| 268 'process_type': 'browser', | |
| 269 } | |
| 270 analysis = FracasCrashAnalysis.Create(crash_identifiers) | |
| 271 analysis.status = analysis_status.RUNNING | |
| 272 analysis.put() | |
| 273 | |
| 274 pipeline = crash_pipeline.CrashAnalysisPipeline(crash_identifiers, 'fracas') | |
| 275 pipeline._SetErrorIfAborted(True) | |
| 276 analysis = FracasCrashAnalysis.Get(crash_identifiers) | |
| 277 self.assertEqual(analysis_status.ERROR, analysis.status) | |
| OLD | NEW |