| OLD | NEW |
| 1 # Copyright 2016 The Chromium Authors. All rights reserved. | 1 # Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 import copy | |
| 5 import json | |
| 6 import logging | |
| 7 | 4 |
| 8 from google.appengine.api import app_identity | |
| 9 from google.appengine.ext import ndb | |
| 10 from webtest.app import AppError | |
| 11 | |
| 12 from common import chrome_dependency_fetcher | |
| 13 from common.pipeline_wrapper import pipeline_handlers | 5 from common.pipeline_wrapper import pipeline_handlers |
| 14 from crash import crash_pipeline | 6 from crash import crash_pipeline |
| 15 from crash.culprit import Culprit | 7 from crash.culprit import Culprit |
| 16 from crash.findit_for_chromecrash import FinditForFracas | 8 from crash.findit_for_chromecrash import FinditForFracas |
| 9 from crash.results import Result |
| 10 from crash.test.crash_testcase import CrashTestCase |
| 17 from crash.type_enums import CrashClient | 11 from crash.type_enums import CrashClient |
| 18 from crash.test.crash_testcase import CrashTestCase | 12 from lib.gitiles.change_log import ChangeLog |
| 19 from model import analysis_status | 13 from model import analysis_status |
| 20 from model.crash.fracas_crash_analysis import FracasCrashAnalysis | 14 from model.crash.fracas_crash_analysis import FracasCrashAnalysis |
| 21 | 15 |
| 22 def DummyCrashData( | 16 def DummyCrashData( |
| 17 client_id=None, |
| 23 version='1', | 18 version='1', |
| 24 signature='signature', | 19 signature='signature', |
| 25 platform='win', | 20 platform='win', |
| 26 stack_trace=None, | 21 stack_trace=None, |
| 27 regression_range=None, | 22 regression_range=None, |
| 28 channel='canary', | 23 channel='canary', |
| 29 historical_metadata=None, | 24 historical_metadata=None, |
| 30 crash_identifiers=True, | 25 crash_identifiers=True, |
| 31 process_type='browser'): | 26 process_type='browser'): |
| 32 if crash_identifiers is True: # pragma: no cover | 27 if crash_identifiers is True: # pragma: no cover |
| 33 crash_identifiers = { | 28 crash_identifiers = { |
| 34 'chrome_version': version, | 29 'chrome_version': version, |
| 35 'signature': signature, | 30 'signature': signature, |
| 36 'channel': channel, | 31 'channel': channel, |
| 37 'platform': platform, | 32 'platform': platform, |
| 38 'process_type': process_type, | 33 'process_type': process_type, |
| 39 } | 34 } |
| 40 return { | 35 crash_data = { |
| 41 'crashed_version': version, | 36 'crashed_version': version, |
| 42 'signature': signature, | 37 'signature': signature, |
| 43 'platform': platform, | 38 'platform': platform, |
| 44 'stack_trace': stack_trace, | 39 'stack_trace': stack_trace, |
| 45 'regression_range': regression_range, | 40 'regression_range': regression_range, |
| 46 'crash_identifiers': crash_identifiers, | 41 'crash_identifiers': crash_identifiers, |
| 47 'customized_data': { | 42 'customized_data': { |
| 48 'historical_metadata': historical_metadata, | 43 'historical_metadata': historical_metadata, |
| 49 'channel': channel, | 44 'channel': channel, |
| 50 }, | 45 }, |
| 51 } | 46 } |
| 52 | 47 # This insertion of client_id is used for debugging ScheduleNewAnalysis. |
| 53 | 48 if client_id is not None: # pragma: no cover |
| 54 class MockCulprit(object): | 49 crash_data['client_id'] = client_id |
| 55 """Construct a fake culprit where ``ToDicts`` returns whatever we please.""" | 50 return crash_data |
| 56 | |
| 57 def __init__(self, mock_result, mock_tags): | |
| 58 self._result = mock_result | |
| 59 self._tags = mock_tags | |
| 60 | |
| 61 def ToDicts(self): # pragma: no cover | |
| 62 return self._result, self._tags | |
| 63 | 51 |
| 64 | 52 |
| 65 class CrashPipelineTest(CrashTestCase): | 53 class CrashPipelineTest(CrashTestCase): |
| 66 app_module = pipeline_handlers._APP | 54 app_module = pipeline_handlers._APP |
| 67 | 55 |
| 68 def testAnalysisAborted(self): | 56 def testAnalysisAborted(self): |
| 69 crash_identifiers = DummyCrashData()['crash_identifiers'] | 57 crash_identifiers = DummyCrashData()['crash_identifiers'] |
| 70 analysis = FracasCrashAnalysis.Create(crash_identifiers) | 58 analysis = FracasCrashAnalysis.Create(crash_identifiers) |
| 71 analysis.status = analysis_status.RUNNING | 59 analysis.status = analysis_status.RUNNING |
| 72 analysis.put() | 60 analysis.put() |
| (...skipping 17 matching lines...) Expand all Loading... |
| 90 crash_identifiers) | 78 crash_identifiers) |
| 91 pipeline.run() | 79 pipeline.run() |
| 92 | 80 |
| 93 analysis = FracasCrashAnalysis.Get(crash_identifiers) | 81 analysis = FracasCrashAnalysis.Get(crash_identifiers) |
| 94 self.assertEqual(analysis_status.COMPLETED, analysis.status) | 82 self.assertEqual(analysis_status.COMPLETED, analysis.status) |
| 95 self.assertFalse(analysis.result['found']) | 83 self.assertFalse(analysis.result['found']) |
| 96 self.assertFalse(analysis.found_suspects) | 84 self.assertFalse(analysis.found_suspects) |
| 97 self.assertFalse(analysis.found_project) | 85 self.assertFalse(analysis.found_project) |
| 98 self.assertFalse(analysis.found_components) | 86 self.assertFalse(analysis.found_components) |
| 99 | 87 |
| 100 # TODO: this function is a gross hack. We should figure out what the | 88 def testFindCulpritSucceeds(self): |
| 101 # semantic goal really is here, so we can avoid doing such intricate | 89 crash_identifiers = DummyCrashData()['crash_identifiers'] |
| 102 # and fragile mocking. | 90 analysis = FracasCrashAnalysis.Create(crash_identifiers) |
| 103 def _TestRunningAnalysisForResult(self, analysis_result, analysis_tags): | 91 analysis.status = analysis_status.RUNNING |
| 92 analysis.put() |
| 104 | 93 |
| 105 # Mock out the part of PublishResultPipeline that would go over the wire. | 94 dummy_cl = ChangeLog( |
| 106 pubsub_publish_requests = [] | 95 'AUTHOR_NAME', |
| 107 def Mocked_PublishMessagesToTopic(messages_data, topic): | 96 'AUTHOR_EMAIL', |
| 108 pubsub_publish_requests.append((messages_data, topic)) | 97 'AUTHOR_TIME', |
| 109 self.mock(crash_pipeline.pubsub_util, 'PublishMessagesToTopic', | 98 'COMITTER_NAME', |
| 110 Mocked_PublishMessagesToTopic) | 99 'COMITTER_EMAIL', |
| 100 'COMITTER_TIME', |
| 101 'REVISION', |
| 102 'COMMIT_POSITION', |
| 103 'MESSAGE', |
| 104 'TOUCHED_FILES', |
| 105 'COMMIT_URL', |
| 106 ) |
| 107 dummy_project_path = 'PROJECT_PATH' |
| 108 dummy_result = Result(dummy_cl, dummy_project_path) |
| 109 dummy_culprit = Culprit( |
| 110 project = 'PROJECT', |
| 111 components = ['COMPONENT_1', 'CPOMPONENT_2'], |
| 112 cls = [dummy_result], |
| 113 # N.B., we must use a list here for the assertion to work |
| 114 # TODO(wrengr): fix that. |
| 115 regression_range = ['VERSION_0', 'VERSION_1'], |
| 116 algorithm = 'ALGORITHM', |
| 117 ) |
| 118 self.mock(FinditForFracas, 'FindCulprit', lambda *_: dummy_culprit) |
| 119 pipeline = crash_pipeline.CrashAnalysisPipeline( |
| 120 CrashClient.FRACAS, |
| 121 crash_identifiers) |
| 122 pipeline.run() |
| 111 | 123 |
| 112 MOCK_HOST = 'https://host.com' | 124 analysis = FracasCrashAnalysis.Get(crash_identifiers) |
| 113 self.mock(app_identity, 'get_default_version_hostname', lambda: MOCK_HOST) | 125 self.assertEqual(analysis_status.COMPLETED, analysis.status) |
| 114 | 126 self.assertTrue(analysis.result['found']) |
| 115 testcase = self | |
| 116 MOCK_KEY = 'MOCK_KEY' | |
| 117 | |
| 118 # Mock out the wrapper pipeline, calling the other pipelines directly. | |
| 119 class _MockPipeline(crash_pipeline.CrashWrapperPipeline): | |
| 120 def start(self, **kwargs): | |
| 121 logging.info('Mock running on queue %s', kwargs['queue_name']) | |
| 122 analysis_pipeline = crash_pipeline.CrashAnalysisPipeline( | |
| 123 self._client_id, self._crash_identifiers) | |
| 124 analysis_pipeline.run() | |
| 125 analysis_pipeline.finalized() | |
| 126 | |
| 127 testcase.mock(ndb.Key, 'urlsafe', lambda _self: MOCK_KEY) | |
| 128 publish_pipeline = crash_pipeline.PublishResultPipeline( | |
| 129 self._client_id, self._crash_identifiers) | |
| 130 publish_pipeline.run() | |
| 131 publish_pipeline.finalized() | |
| 132 | |
| 133 # Mock out FindCulprit to track the number of times it's called and | |
| 134 # with which arguments. N.B., the pipeline will reconstruct Findit | |
| 135 # objects form their client_id, so we can't mock via subclassing, | |
| 136 # we must mock via ``self.mock``. | |
| 137 mock_culprit = MockCulprit(analysis_result, analysis_tags) | |
| 138 analyzed_crashes = [] | |
| 139 def _MockFindCulprit(_self, model): | |
| 140 analyzed_crashes.append(model) | |
| 141 return mock_culprit | |
| 142 self.mock(FinditForFracas, 'FindCulprit', _MockFindCulprit) | |
| 143 | |
| 144 # The real ``ParseStacktrace`` calls ``GetChromeDependency``, | |
| 145 # which eventually calls ``GitRepository.GetSource`` and hence | |
| 146 # goes over the wire. Since we mocked out ``FindCulprit`` to no | |
| 147 # longer call ``ParseStacktrace``, it shouldn't matter what the real | |
| 148 # ``ParseStacktrace`` does. However, since mocking is fragile and it's | |
| 149 # hard to triage what actually went wrong if we do end up going over | |
| 150 # the wire, we mock this out too just to be safe. | |
| 151 def _MockParseStacktrace(_self, _model): | |
| 152 raise AssertionError("ParseStacktrace shouldn't ever be called. " | |
| 153 'That it was indicates some sort of problem with our mocking code.') | |
| 154 self.mock(FinditForFracas, 'ParseStacktrace', _MockParseStacktrace) | |
| 155 | |
| 156 # More directly address the issue about ``GetChromeDependency`` going | |
| 157 # over the wire. | |
| 158 def _MockGetChromeDependency(_self, _revision, _platform): | |
| 159 raise AssertionError("GetChromeDependency shouldn't ever be called. " | |
| 160 'That it was indicates some sort of problem with our mocking code.') | |
| 161 self.mock(chrome_dependency_fetcher.ChromeDependencyFetcher, | |
| 162 'GetDependency', _MockGetChromeDependency) | |
| 163 | |
| 164 crash_data = DummyCrashData( | |
| 165 version = '50.2500.0.1', | |
| 166 stack_trace = 'frame1\nframe2\nframe3') | |
| 167 # A fake repository, needed by the Findit constructor. We should never | |
| 168 # go over the wire (e.g., in the call to ScheduleNewAnalysis below), | |
| 169 # and this helps ensure that. (The current version of the tests | |
| 170 # don't seem to need the repo at all, so None is a sufficient mock | |
| 171 # for now.) | |
| 172 mock_repository = None | |
| 173 self.assertTrue( | |
| 174 FinditForFracas(mock_repository, _MockPipeline).ScheduleNewAnalysis( | |
| 175 crash_data)) | |
| 176 | |
| 177 # The catch/re-raise is to clean up the callstack that's reported | |
| 178 # when things acciddentally go over the wire (and subsequently fail). | |
| 179 try: | |
| 180 self.execute_queued_tasks() | |
| 181 except AppError, e: # pragma: no cover | |
| 182 raise e | |
| 183 | |
| 184 self.assertEqual(1, len(pubsub_publish_requests)) | |
| 185 | |
| 186 processed_analysis_result = copy.deepcopy(analysis_result) | |
| 187 processed_analysis_result['feedback_url'] = ( | |
| 188 '%s/crash/fracas-result-feedback?key=%s' % (MOCK_HOST, MOCK_KEY)) | |
| 189 | |
| 190 for cl in processed_analysis_result.get('suspected_cls', []): | |
| 191 cl['confidence'] = round(cl['confidence'], 2) | |
| 192 cl.pop('reason', None) | |
| 193 | |
| 194 expected_messages_data = [json.dumps({ | |
| 195 'crash_identifiers': crash_data['crash_identifiers'], | |
| 196 'client_id': CrashClient.FRACAS, | |
| 197 'result': processed_analysis_result, | |
| 198 }, sort_keys=True)] | |
| 199 self.assertListEqual(expected_messages_data, pubsub_publish_requests[0][0]) | |
| 200 self.assertEqual(1, len(analyzed_crashes)) | |
| 201 analysis = analyzed_crashes[0] | |
| 202 self.assertTrue(isinstance(analysis, FracasCrashAnalysis)) | |
| 203 self.assertEqual(crash_data['signature'], analysis.signature) | |
| 204 self.assertEqual(crash_data['platform'], analysis.platform) | |
| 205 self.assertEqual(crash_data['stack_trace'], analysis.stack_trace) | |
| 206 self.assertEqual(crash_data['crashed_version'], analysis.crashed_version) | |
| 207 self.assertEqual(crash_data['regression_range'], analysis.regression_range) | |
| 208 | |
| 209 analysis = FracasCrashAnalysis.Get(crash_data['crash_identifiers']) | |
| 210 self.assertEqual(analysis_result, analysis.result) | |
| 211 return analysis | |
| 212 | |
| 213 def testRunningAnalysis(self): | |
| 214 analysis_result = { | |
| 215 'found': True, | |
| 216 'suspected_cls': [], | |
| 217 'other_data': 'data', | |
| 218 } | |
| 219 analysis_tags = { | |
| 220 'found_suspects': True, | |
| 221 'has_regression_range': True, | |
| 222 'solution': 'core', | |
| 223 'unsupported_tag': '', | |
| 224 } | |
| 225 | |
| 226 analysis = self._TestRunningAnalysisForResult( | |
| 227 analysis_result, analysis_tags) | |
| 228 self.assertTrue(analysis.has_regression_range) | |
| 229 self.assertTrue(analysis.found_suspects) | 127 self.assertTrue(analysis.found_suspects) |
| 230 self.assertEqual('core', analysis.solution) | 128 self.assertTrue(analysis.found_project) |
| 231 | 129 self.assertTrue(analysis.found_components) |
| 232 def testRunningAnalysisNoSuspectsFound(self): | 130 dummy_result, dummy_tags = dummy_culprit.ToDicts() |
| 233 analysis_result = { | 131 self.assertDictEqual(analysis.result, dummy_result) |
| 234 'found': False | |
| 235 } | |
| 236 analysis_tags = { | |
| 237 'found_suspects': False, | |
| 238 'has_regression_range': False, | |
| 239 'solution': 'core', | |
| 240 'unsupported_tag': '', | |
| 241 } | |
| 242 | |
| 243 analysis = self._TestRunningAnalysisForResult( | |
| 244 analysis_result, analysis_tags) | |
| 245 self.assertFalse(analysis.has_regression_range) | |
| 246 self.assertFalse(analysis.found_suspects) | |
| 247 self.assertEqual('core', analysis.solution) | |
| 248 | |
| 249 def testRunningAnalysisWithSuspectsCls(self): | |
| 250 analysis_result = { | |
| 251 'found': True, | |
| 252 'suspected_cls': [ | |
| 253 {'confidence': 0.21434, | |
| 254 'reason': ['reason1', 'reason2'], | |
| 255 'other': 'data'} | |
| 256 ], | |
| 257 'other_data': 'data', | |
| 258 } | |
| 259 analysis_tags = { | |
| 260 'found_suspects': True, | |
| 261 'has_regression_range': True, | |
| 262 'solution': 'core', | |
| 263 'unsupported_tag': '', | |
| 264 } | |
| 265 | |
| 266 analysis = self._TestRunningAnalysisForResult( | |
| 267 analysis_result, analysis_tags) | |
| 268 self.assertTrue(analysis.has_regression_range) | |
| 269 self.assertTrue(analysis.found_suspects) | |
| 270 self.assertEqual('core', analysis.solution) | |
| OLD | NEW |