Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright 2016 The Chromium Authors. All rights reserved. | 1 # Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 import copy | |
| 5 import json | |
| 6 import logging | |
| 7 | 4 |
| 8 from google.appengine.api import app_identity | |
| 9 from google.appengine.ext import ndb | |
| 10 from webtest.app import AppError | |
| 11 | |
| 12 from common import chrome_dependency_fetcher | |
| 13 from common.pipeline_wrapper import pipeline_handlers | 5 from common.pipeline_wrapper import pipeline_handlers |
| 14 from crash import crash_pipeline | 6 from crash import crash_pipeline |
| 15 from crash.culprit import Culprit | |
| 16 from crash.findit_for_chromecrash import FinditForFracas | |
| 17 from crash.type_enums import CrashClient | 7 from crash.type_enums import CrashClient |
| 18 from crash.test.crash_testcase import CrashTestCase | 8 from crash.test.crash_testcase import CrashTestCase |
| 19 from model import analysis_status | 9 from model import analysis_status |
| 20 from model.crash.fracas_crash_analysis import FracasCrashAnalysis | 10 from model.crash.fracas_crash_analysis import FracasCrashAnalysis |
| 21 | 11 |
| 22 def DummyCrashData( | 12 def DummyCrashData( |
| 13 client_id=None, | |
| 23 version='1', | 14 version='1', |
| 24 signature='signature', | 15 signature='signature', |
| 25 platform='win', | 16 platform='win', |
| 26 stack_trace=None, | 17 stack_trace=None, |
| 27 regression_range=None, | 18 regression_range=None, |
| 28 channel='canary', | 19 channel='canary', |
| 29 historical_metadata=None, | 20 historical_metadata=None, |
| 30 crash_identifiers=True, | 21 crash_identifiers=True, |
| 31 process_type='browser'): | 22 process_type='browser'): |
| 32 if crash_identifiers is True: # pragma: no cover | 23 if crash_identifiers is True: # pragma: no cover |
| 33 crash_identifiers = { | 24 crash_identifiers = { |
| 34 'chrome_version': version, | 25 'chrome_version': version, |
| 35 'signature': signature, | 26 'signature': signature, |
| 36 'channel': channel, | 27 'channel': channel, |
| 37 'platform': platform, | 28 'platform': platform, |
| 38 'process_type': process_type, | 29 'process_type': process_type, |
| 39 } | 30 } |
| 40 return { | 31 crash_data = { |
| 41 'crashed_version': version, | 32 'crashed_version': version, |
| 42 'signature': signature, | 33 'signature': signature, |
| 43 'platform': platform, | 34 'platform': platform, |
| 44 'stack_trace': stack_trace, | 35 'stack_trace': stack_trace, |
| 45 'regression_range': regression_range, | 36 'regression_range': regression_range, |
| 46 'crash_identifiers': crash_identifiers, | 37 'crash_identifiers': crash_identifiers, |
| 47 'customized_data': { | 38 'customized_data': { |
| 48 'historical_metadata': historical_metadata, | 39 'historical_metadata': historical_metadata, |
| 49 'channel': channel, | 40 'channel': channel, |
| 50 }, | 41 }, |
| 51 } | 42 } |
| 52 | 43 # This insertion of client_id is used for debugging ScheduleNewAnalysis. |
| 53 | 44 if client_id is not None: # pragma: no cover |
| 54 class MockCulprit(object): | 45 crash_data['client_id'] = client_id |
| 55 """Construct a fake culprit where |ToDicts| returns whatever we please.""" | 46 return crash_data |
| 56 | |
| 57 def __init__(self, mock_result, mock_tags): | |
| 58 self._result = mock_result | |
| 59 self._tags = mock_tags | |
| 60 | |
| 61 def ToDicts(self): # pragma: no cover | |
| 62 return self._result, self._tags | |
| 63 | 47 |
| 64 | 48 |
| 65 class CrashPipelineTest(CrashTestCase): | 49 class CrashPipelineTest(CrashTestCase): |
| 66 app_module = pipeline_handlers._APP | 50 app_module = pipeline_handlers._APP |
| 67 | 51 |
| 68 def testAnalysisAborted(self): | 52 def testAnalysisAborted(self): |
| 69 crash_identifiers = DummyCrashData()['crash_identifiers'] | 53 crash_identifiers = DummyCrashData()['crash_identifiers'] |
| 70 analysis = FracasCrashAnalysis.Create(crash_identifiers) | 54 analysis = FracasCrashAnalysis.Create(crash_identifiers) |
| 71 analysis.status = analysis_status.RUNNING | 55 analysis.status = analysis_status.RUNNING |
| 72 analysis.put() | 56 analysis.put() |
| 73 | 57 |
| 74 pipeline = crash_pipeline.CrashAnalysisPipeline( | 58 pipeline = crash_pipeline.CrashAnalysisPipeline( |
| 75 CrashClient.FRACAS, | 59 CrashClient.FRACAS, |
| 76 crash_identifiers) | 60 crash_identifiers) |
| 77 pipeline._PutAbortedError() | 61 pipeline._PutAbortedError() |
| 78 analysis = FracasCrashAnalysis.Get(crash_identifiers) | 62 analysis = FracasCrashAnalysis.Get(crash_identifiers) |
| 79 self.assertEqual(analysis_status.ERROR, analysis.status) | 63 self.assertEqual(analysis_status.ERROR, analysis.status) |
| 80 | |
| 81 | |
| 82 # TODO: this function is a gross hack. We should figure out what the | |
| 83 # semantic goal really is here, so we can avoid doing such intricate | |
| 84 # and fragile mocking. | |
| 85 def _TestRunningAnalysisForResult(self, analysis_result, analysis_tags): | |
|
Sharu Jiang
2016/10/27 23:55:33
The purpose of this test is to test the process of
wrengr
2016/10/28 18:10:59
Then I need you to submit a CL with the correct mo
Sharu Jiang
2016/11/01 23:03:24
You can just remove #154-#157 part, and call _Mock
| |
| 86 | |
| 87 # Mock out the part of PublishResultPipeline that would go over the wire. | |
| 88 pubsub_publish_requests = [] | |
| 89 def Mocked_PublishMessagesToTopic(messages_data, topic): | |
| 90 pubsub_publish_requests.append((messages_data, topic)) | |
| 91 self.mock(crash_pipeline.pubsub_util, 'PublishMessagesToTopic', | |
| 92 Mocked_PublishMessagesToTopic) | |
| 93 | |
| 94 MOCK_HOST = 'https://host.com' | |
| 95 self.mock(app_identity, 'get_default_version_hostname', lambda: MOCK_HOST) | |
| 96 | |
| 97 testcase = self | |
| 98 MOCK_KEY = 'MOCK_KEY' | |
| 99 | |
| 100 # Mock out the wrapper pipeline, calling the other pipelines directly. | |
| 101 class _MockPipeline(crash_pipeline.CrashWrapperPipeline): | |
| 102 def start(self, **kwargs): | |
| 103 logging.info('Mock running on queue %s', kwargs['queue_name']) | |
| 104 analysis_pipeline = crash_pipeline.CrashAnalysisPipeline( | |
| 105 self._client_id, self._crash_identifiers) | |
| 106 analysis_pipeline.run() | |
| 107 analysis_pipeline.finalized() | |
| 108 | |
| 109 testcase.mock(ndb.Key, 'urlsafe', lambda _self: MOCK_KEY) | |
| 110 publish_pipeline = crash_pipeline.PublishResultPipeline( | |
| 111 self._client_id, self._crash_identifiers) | |
| 112 publish_pipeline.run() | |
| 113 publish_pipeline.finalized() | |
| 114 | |
| 115 # Mock out FindCulprit to track the number of times it's called and | |
| 116 # with which arguments. N.B., the pipeline will reconstruct Findit | |
| 117 # objects form their client_id, so we can't mock via subclassing, | |
| 118 # we must mock via |self.mock|. | |
| 119 mock_culprit = MockCulprit(analysis_result, analysis_tags) | |
| 120 analyzed_crashes = [] | |
| 121 def _MockFindCulprit(_self, model): | |
| 122 analyzed_crashes.append(model) | |
| 123 return mock_culprit | |
| 124 self.mock(FinditForFracas, 'FindCulprit', _MockFindCulprit) | |
| 125 | |
| 126 # The real |ParseStacktrace| calls |GetChromeDependency|, which | |
| 127 # eventually calls |GitRepository.GetSource| and hence goes over | |
| 128 # the wire. Since we mocked out |FindCulprit| to no longer call | |
| 129 # |ParseStacktrace|, it shouldn't matter what the real | |
| 130 # |ParseStacktrace| does. However, since mocking is fragile and it's | |
| 131 # hard to triage what actually went wrong if we do end up going over | |
| 132 # the wire, we mock this out too just to be safe. | |
| 133 def _MockParseStacktrace(_self, _model): | |
| 134 raise AssertionError("ParseStacktrace shouldn't ever be called. " | |
| 135 'That it was indicates some sort of problem with our mocking code.') | |
| 136 self.mock(FinditForFracas, 'ParseStacktrace', _MockParseStacktrace) | |
| 137 | |
| 138 # More directly address the issue about |GetChromeDependency| going | |
| 139 # over the wire. | |
| 140 def _MockGetChromeDependency(_self, _revision, _platform): | |
| 141 raise AssertionError("GetChromeDependency shouldn't ever be called. " | |
| 142 'That it was indicates some sort of problem with our mocking code.') | |
| 143 self.mock(chrome_dependency_fetcher.ChromeDependencyFetcher, | |
| 144 'GetDependency', _MockGetChromeDependency) | |
| 145 | |
| 146 crash_data = DummyCrashData( | |
| 147 version = '50.2500.0.1', | |
| 148 stack_trace = 'frame1\nframe2\nframe3') | |
| 149 # A fake repository, needed by the Findit constructor. We should never | |
| 150 # go over the wire (e.g., in the call to ScheduleNewAnalysis below), | |
| 151 # and this helps ensure that. (The current version of the tests | |
| 152 # don't seem to need the repo at all, so None is a sufficient mock | |
| 153 # for now.) | |
| 154 mock_repository = None | |
| 155 self.assertTrue( | |
| 156 FinditForFracas(mock_repository, _MockPipeline).ScheduleNewAnalysis( | |
| 157 crash_data)) | |
| 158 | |
| 159 # The catch/re-raise is to clean up the callstack that's reported | |
| 160 # when things acciddentally go over the wire (and subsequently fail). | |
| 161 try: | |
| 162 self.execute_queued_tasks() | |
| 163 except AppError, e: # pragma: no cover | |
| 164 raise e | |
| 165 | |
| 166 self.assertEqual(1, len(pubsub_publish_requests)) | |
| 167 | |
| 168 processed_analysis_result = copy.deepcopy(analysis_result) | |
| 169 processed_analysis_result['feedback_url'] = ( | |
| 170 '%s/crash/fracas-result-feedback?key=%s' % (MOCK_HOST, MOCK_KEY)) | |
| 171 | |
| 172 for cl in processed_analysis_result.get('suspected_cls', []): | |
| 173 cl['confidence'] = round(cl['confidence'], 2) | |
| 174 cl.pop('reason', None) | |
| 175 | |
| 176 expected_messages_data = [json.dumps({ | |
| 177 'crash_identifiers': crash_data['crash_identifiers'], | |
| 178 'client_id': CrashClient.FRACAS, | |
| 179 'result': processed_analysis_result, | |
| 180 }, sort_keys=True)] | |
| 181 self.assertListEqual(expected_messages_data, pubsub_publish_requests[0][0]) | |
| 182 self.assertEqual(1, len(analyzed_crashes)) | |
| 183 analysis = analyzed_crashes[0] | |
| 184 self.assertTrue(isinstance(analysis, FracasCrashAnalysis)) | |
| 185 self.assertEqual(crash_data['signature'], analysis.signature) | |
| 186 self.assertEqual(crash_data['platform'], analysis.platform) | |
| 187 self.assertEqual(crash_data['stack_trace'], analysis.stack_trace) | |
| 188 self.assertEqual(crash_data['crashed_version'], analysis.crashed_version) | |
| 189 self.assertEqual(crash_data['regression_range'], analysis.regression_range) | |
| 190 | |
| 191 analysis = FracasCrashAnalysis.Get(crash_data['crash_identifiers']) | |
| 192 self.assertEqual(analysis_result, analysis.result) | |
| 193 return analysis | |
| 194 | |
| 195 def testRunningAnalysis(self): | |
| 196 analysis_result = { | |
| 197 'found': True, | |
| 198 'suspected_cls': [], | |
| 199 'other_data': 'data', | |
| 200 } | |
| 201 analysis_tags = { | |
| 202 'found_suspects': True, | |
| 203 'has_regression_range': True, | |
| 204 'solution': 'core', | |
| 205 'unsupported_tag': '', | |
| 206 } | |
| 207 | |
| 208 analysis = self._TestRunningAnalysisForResult( | |
| 209 analysis_result, analysis_tags) | |
| 210 self.assertTrue(analysis.has_regression_range) | |
| 211 self.assertTrue(analysis.found_suspects) | |
| 212 self.assertEqual('core', analysis.solution) | |
| 213 | |
| 214 def testRunningAnalysisNoSuspectsFound(self): | |
| 215 analysis_result = { | |
| 216 'found': False | |
| 217 } | |
| 218 analysis_tags = { | |
| 219 'found_suspects': False, | |
| 220 'has_regression_range': False, | |
| 221 'solution': 'core', | |
| 222 'unsupported_tag': '', | |
| 223 } | |
| 224 | |
| 225 analysis = self._TestRunningAnalysisForResult( | |
| 226 analysis_result, analysis_tags) | |
| 227 self.assertFalse(analysis.has_regression_range) | |
| 228 self.assertFalse(analysis.found_suspects) | |
| 229 self.assertEqual('core', analysis.solution) | |
| 230 | |
| 231 def testRunningAnalysisWithSuspectsCls(self): | |
| 232 analysis_result = { | |
| 233 'found': True, | |
| 234 'suspected_cls': [ | |
| 235 {'confidence': 0.21434, | |
| 236 'reason': ['reason1', 'reason2'], | |
| 237 'other': 'data'} | |
| 238 ], | |
| 239 'other_data': 'data', | |
| 240 } | |
| 241 analysis_tags = { | |
| 242 'found_suspects': True, | |
| 243 'has_regression_range': True, | |
| 244 'solution': 'core', | |
| 245 'unsupported_tag': '', | |
| 246 } | |
| 247 | |
| 248 analysis = self._TestRunningAnalysisForResult( | |
| 249 analysis_result, analysis_tags) | |
| 250 self.assertTrue(analysis.has_regression_range) | |
| 251 self.assertTrue(analysis.found_suspects) | |
| 252 self.assertEqual('core', analysis.solution) | |
| OLD | NEW |