| OLD | NEW |
| 1 # Copyright 2015 The Chromium Authors. All rights reserved. | 1 # Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 import base64 | 5 import base64 |
| 6 import copy | 6 import copy |
| 7 import json | 7 import json |
| 8 import logging | 8 import logging |
| 9 | 9 |
| 10 from google.appengine.api import app_identity | 10 from google.appengine.api import app_identity |
| 11 from google.appengine.ext import ndb | 11 from google.appengine.ext import ndb |
| 12 import webapp2 | 12 import webapp2 |
| 13 from webtest.app import AppError | 13 from webtest.app import AppError |
| 14 | 14 |
| 15 from common import chrome_dependency_fetcher | 15 from common import chrome_dependency_fetcher |
| 16 from crash import crash_pipeline | 16 from crash import crash_pipeline |
| 17 from crash.crash_pipeline import CrashWrapperPipeline |
| 17 from crash.findit import Findit | 18 from crash.findit import Findit |
| 18 from crash.findit_for_chromecrash import FinditForFracas | 19 from crash.findit_for_chromecrash import FinditForFracas |
| 19 from crash.test.predator_testcase import PredatorTestCase | 20 from crash.test.predator_testcase import PredatorTestCase |
| 20 from crash.type_enums import CrashClient | 21 from crash.type_enums import CrashClient |
| 21 from handlers.crash import crash_handler | 22 from handlers.crash import crash_handler |
| 22 from libs.gitiles import gitiles_repository | 23 from libs.gitiles import gitiles_repository |
| 23 from model import analysis_status | 24 from model import analysis_status |
| 25 from model.crash.crash_analysis import CrashAnalysis |
| 24 from model.crash.crash_config import CrashConfig | 26 from model.crash.crash_config import CrashConfig |
| 25 from model.crash.fracas_crash_analysis import FracasCrashAnalysis | |
| 26 | |
| 27 | |
| 28 MOCK_GET_REPOSITORY = lambda _: None # pragma: no cover | |
| 29 | |
| 30 | |
| 31 class MockCulprit(object): | |
| 32 """Construct a fake culprit where ``ToDicts`` returns whatever we please.""" | |
| 33 | |
| 34 def __init__(self, mock_result, mock_tags): | |
| 35 self._result = mock_result | |
| 36 self._tags = mock_tags | |
| 37 | |
| 38 def ToDicts(self): # pragma: no cover | |
| 39 return self._result, self._tags | |
| 40 | 27 |
| 41 | 28 |
| 42 class CrashHandlerTest(PredatorTestCase): | 29 class CrashHandlerTest(PredatorTestCase): |
| 43 app_module = webapp2.WSGIApplication([ | 30 app_module = webapp2.WSGIApplication([ |
| 44 ('/_ah/push-handlers/crash/fracas', crash_handler.CrashHandler), | 31 ('/_ah/push-handlers/crash/fracas', crash_handler.CrashHandler), |
| 45 ], debug=True) | 32 ], debug=True) |
| 46 | 33 |
| 47 def testScheduleNewAnalysisWithFailingPolicy(self): | 34 def testDoNotScheduleNewAnalysisIfNeedsNewAnalysisReturnsFalse(self): |
| 48 mock_findit = self.GetMockFindit() | 35 mock_findit = self.GetMockFindit() |
| 49 self.mock(mock_findit, 'CheckPolicy', lambda *_: None) | 36 self.mock(mock_findit, 'NeedsNewAnalysis', lambda _: False) |
| 50 self.mock(crash_pipeline, 'FinditForClientID', lambda *_: mock_findit) | 37 self.mock(crash_pipeline, 'FinditForClientID', lambda *_: mock_findit) |
| 51 self.assertFalse(crash_handler.ScheduleNewAnalysis(self.GetDummyCrashData( | 38 # Check policy failed due to empty client config. |
| 52 client_id = 'MOCK_CLIENT'))) | 39 self.assertFalse(crash_handler.ScheduleNewAnalysis( |
| 40 self.GetDummyChromeCrashData())) |
| 53 | 41 |
| 54 def testScheduleNewAnalysisWithPlatformRename(self): | 42 def testScheduleNewAnalysisIfNeedsNewAnalysisReturnsTrue(self): |
| 55 original_crash_data = self.GetDummyCrashData( | 43 mock_findit = self.GetMockFindit(client_id=CrashClient.FRACAS) |
| 56 client_id = 'MOCK_CLIENT', | 44 self.mock(mock_findit, 'NeedsNewAnalysis', lambda _: True) |
| 57 version = None, | 45 self.mock(crash_pipeline, 'FinditForClientID', lambda *_: mock_findit) |
| 58 platform = 'unix', | 46 self.assertTrue(crash_handler.ScheduleNewAnalysis( |
| 59 crash_identifiers = {}) | 47 self.GetDummyChromeCrashData(client_id=CrashClient.FRACAS))) |
| 60 renamed_crash_data = copy.deepcopy(original_crash_data) | |
| 61 renamed_crash_data['platform'] = 'linux' | |
| 62 | 48 |
| 63 self.mock(crash_pipeline, 'FinditForClientID', | 49 def testHandlePostScheduleNewAnalysis(self): |
| 64 lambda *_: self.GetMockFindit(client_id='fracas')) | 50 chrome_version = '50.2500.0.0' |
| 65 self.assertFalse(crash_handler.ScheduleNewAnalysis(original_crash_data)) | |
| 66 | |
| 67 def testScheduleNewAnalysisSkipsUnsupportedChannel(self): | |
| 68 self.assertFalse(crash_handler.ScheduleNewAnalysis(self.GetDummyCrashData( | |
| 69 client_id = CrashClient.FRACAS, | |
| 70 version = None, | |
| 71 signature = None, | |
| 72 crash_identifiers = {}, | |
| 73 channel = 'unsupported_channel'))) | |
| 74 | |
| 75 def testScheduleNewAnalysisSkipsUnsupportedPlatform(self): | |
| 76 self.assertFalse(crash_handler.ScheduleNewAnalysis(self.GetDummyCrashData( | |
| 77 client_id = CrashClient.FRACAS, | |
| 78 version = None, | |
| 79 signature = None, | |
| 80 platform = 'unsupported_platform', | |
| 81 crash_identifiers = {}))) | |
| 82 | |
| 83 def testScheduleNewAnalysisSkipsBlackListSignature(self): | |
| 84 self.assertFalse(crash_handler.ScheduleNewAnalysis(self.GetDummyCrashData( | |
| 85 client_id = CrashClient.FRACAS, | |
| 86 version = None, | |
| 87 signature = 'Blacklist marker signature', | |
| 88 crash_identifiers = {}))) | |
| 89 | |
| 90 def testScheduleNewAnalysisSkipsIfAlreadyCompleted(self): | |
| 91 findit_client = FinditForFracas(MOCK_GET_REPOSITORY, CrashConfig.Get()) | |
| 92 crash_data = self.GetDummyCrashData(client_id = findit_client.client_id) | |
| 93 crash_identifiers = crash_data['crash_identifiers'] | |
| 94 analysis = findit_client.CreateAnalysis(crash_identifiers) | |
| 95 analysis.status = analysis_status.COMPLETED | |
| 96 analysis.put() | |
| 97 self.assertFalse(crash_handler.ScheduleNewAnalysis(crash_data)) | |
| 98 | |
| 99 def testAnalysisScheduled(self): | |
| 100 # We need to mock out the method on Findit itself (rather than using a | |
| 101 # subclass), since this method only gets called on objects we | |
| 102 # ourselves don't construct. | |
| 103 requested_crashes = [] | |
| 104 def _MockScheduleNewAnalysis(crash_data): | |
| 105 requested_crashes.append(crash_data) | |
| 106 self.mock(crash_handler, 'ScheduleNewAnalysis', _MockScheduleNewAnalysis) | |
| 107 | |
| 108 self.mock_current_user(user_email='test@chromium.org', is_admin=True) | |
| 109 | |
| 110 channel = 'supported_channel' | |
| 111 platform = 'supported_platform' | |
| 112 signature = 'signature/here' | 51 signature = 'signature/here' |
| 113 chrome_version = '50.2500.0.0' | 52 channel = 'canary' |
| 114 crash_data = { | 53 platform = 'mac' |
| 115 'client_id': 'fracas', | 54 crash_data = self.GetDummyChromeCrashData( |
| 116 'platform': platform, | 55 client_id=CrashClient.FRACAS, |
| 117 'signature': signature, | 56 channel=channel, platform=platform, |
| 118 'stack_trace': 'frame1\nframe2\nframe3', | 57 signature=signature, version=chrome_version, |
| 119 'chrome_version': chrome_version, | 58 process_type='renderer') |
| 120 'crash_identifiers': { | |
| 121 'chrome_version': chrome_version, | |
| 122 'signature': signature, | |
| 123 'channel': channel, | |
| 124 'platform': platform, | |
| 125 'process_type': 'renderer', | |
| 126 }, | |
| 127 'customized_data': { | |
| 128 'channel': channel, | |
| 129 'historical_metadata': | |
| 130 [{'chrome_version': chrome_version, 'cpm': 0.6}], | |
| 131 }, | |
| 132 } | |
| 133 | 59 |
| 134 request_json_data = { | 60 request_json_data = { |
| 135 'message': { | 61 'message': { |
| 136 'data': base64.b64encode(json.dumps(crash_data)), | 62 'data': base64.b64encode(json.dumps(crash_data)), |
| 137 'message_id': 'id', | 63 'message_id': 'id', |
| 138 }, | 64 }, |
| 139 'subscription': 'subscription', | 65 'subscription': 'subscription', |
| 140 } | 66 } |
| 141 | 67 |
| 68 self.MockPipeline( |
| 69 CrashWrapperPipeline, True, |
| 70 (crash_data['client_id'], crash_data['crash_identifiers'])) |
| 71 self.mock(CrashAnalysis, 'Initialize', lambda *_: None) |
| 72 |
| 142 self.test_app.post_json('/_ah/push-handlers/crash/fracas', | 73 self.test_app.post_json('/_ah/push-handlers/crash/fracas', |
| 143 request_json_data) | 74 request_json_data) |
| 144 | |
| 145 self.assertEqual(1, len(requested_crashes)) | |
| 146 self.assertEqual(crash_data, requested_crashes[0]) | |
| 147 | |
| 148 # TODO: this function is a gross hack. We should figure out what the | |
| 149 # semantic goal really is here, so we can avoid doing such intricate | |
| 150 # and fragile mocking. | |
| 151 def _TestRunningAnalysisForResult(self, analysis_result, analysis_tags): | |
| 152 | |
| 153 # Mock out the part of PublishResultPipeline that would go over the wire. | |
| 154 pubsub_publish_requests = [] | |
| 155 def Mocked_PublishMessagesToTopic(messages_data, topic): | |
| 156 pubsub_publish_requests.append((messages_data, topic)) | |
| 157 self.mock(crash_pipeline.pubsub_util, 'PublishMessagesToTopic', | |
| 158 Mocked_PublishMessagesToTopic) | |
| 159 | |
| 160 MOCK_HOST = 'host.com' | |
| 161 self.mock(app_identity, 'get_default_version_hostname', lambda: MOCK_HOST) | |
| 162 | |
| 163 testcase = self | |
| 164 MOCK_KEY = 'MOCK_KEY' | |
| 165 | |
| 166 # Mock out the wrapper pipeline, so call the other pipelines directly | |
| 167 # instead of doing the yielding loop and spawning off processes. | |
| 168 def mock_start_pipeline(self, **kwargs): | |
| 169 logging.info('Mock running on queue %s', kwargs['queue_name']) | |
| 170 analysis_pipeline = crash_pipeline.CrashAnalysisPipeline( | |
| 171 self._client_id, self._crash_identifiers) | |
| 172 analysis_pipeline.run() | |
| 173 analysis_pipeline.finalized() | |
| 174 | |
| 175 testcase.mock(ndb.Key, 'urlsafe', lambda _self: MOCK_KEY) | |
| 176 publish_pipeline = crash_pipeline.PublishResultPipeline( | |
| 177 self._client_id, self._crash_identifiers) | |
| 178 publish_pipeline.run() | |
| 179 publish_pipeline.finalized() | |
| 180 self.mock(crash_pipeline.CrashWrapperPipeline, 'start', mock_start_pipeline) | |
| 181 | |
| 182 # Mock out FindCulprit to track the number of times it's called and | |
| 183 # with which arguments. N.B., the pipeline will reconstruct Findit | |
| 184 # objects form their client_id, so we can't mock via subclassing, | |
| 185 # we must mock via ``self.mock``. | |
| 186 mock_culprit = MockCulprit(analysis_result, analysis_tags) | |
| 187 analyzed_crashes = [] | |
| 188 def _MockFindCulprit(_self, model): | |
| 189 analyzed_crashes.append(model) | |
| 190 return mock_culprit | |
| 191 self.mock(FinditForFracas, 'FindCulprit', _MockFindCulprit) | |
| 192 | |
| 193 # The real ``ParseStacktrace`` calls ``GetChromeDependency``, | |
| 194 # which eventually calls ``GitRepository.GetSource`` and hence | |
| 195 # goes over the wire. Since we mocked out ``FindCulprit`` to no | |
| 196 # longer call ``ParseStacktrace``, it shouldn't matter what the real | |
| 197 # ``ParseStacktrace`` does. However, since mocking is fragile and it's | |
| 198 # hard to triage what actually went wrong if we do end up going over | |
| 199 # the wire, we mock this out too just to be safe. | |
| 200 def _MockParseStacktrace(_self, _model): | |
| 201 raise AssertionError("ParseStacktrace shouldn't ever be called. " | |
| 202 'That it was indicates some sort of problem with our mocking code.') | |
| 203 self.mock(FinditForFracas, 'ParseStacktrace', _MockParseStacktrace) | |
| 204 | |
| 205 # More directly address the issue about ``GetChromeDependency`` going | |
| 206 # over the wire. | |
| 207 def _MockGetChromeDependency(_self, _revision, _platform): | |
| 208 raise AssertionError("GetChromeDependency shouldn't ever be called. " | |
| 209 'That it was indicates some sort of problem with our mocking code.') | |
| 210 self.mock(chrome_dependency_fetcher.ChromeDependencyFetcher, | |
| 211 'GetDependency', _MockGetChromeDependency) | |
| 212 | |
| 213 crash_data = self.GetDummyCrashData( | |
| 214 client_id = CrashClient.FRACAS, | |
| 215 version = '50.2500.0.1', | |
| 216 stack_trace = 'frame1\nframe2\nframe3') | |
| 217 self.assertTrue(crash_handler.ScheduleNewAnalysis(crash_data)) | |
| 218 | |
| 219 # The catch/re-raise is to clean up the callstack that's reported | |
| 220 # when things acciddentally go over the wire (and subsequently fail). | |
| 221 try: | |
| 222 self.execute_queued_tasks() | |
| 223 except AppError, e: # pragma: no cover | |
| 224 raise e | |
| 225 | |
| 226 self.assertEqual(1, len(pubsub_publish_requests)) | |
| 227 | |
| 228 processed_analysis_result = copy.deepcopy(analysis_result) | |
| 229 processed_analysis_result['feedback_url'] = ( | |
| 230 'https://%s/crash/fracas-result-feedback?key=%s' % (MOCK_HOST, | |
| 231 MOCK_KEY)) | |
| 232 | |
| 233 for cl in processed_analysis_result.get('suspected_cls', []): | |
| 234 cl['confidence'] = round(cl['confidence'], 2) | |
| 235 cl.pop('reasons', None) | |
| 236 | |
| 237 expected_messages_data = [json.dumps({ | |
| 238 'crash_identifiers': crash_data['crash_identifiers'], | |
| 239 'client_id': CrashClient.FRACAS, | |
| 240 'result': processed_analysis_result, | |
| 241 }, sort_keys=True)] | |
| 242 self.assertListEqual(expected_messages_data, pubsub_publish_requests[0][0]) | |
| 243 self.assertEqual(1, len(analyzed_crashes)) | |
| 244 analysis = analyzed_crashes[0] | |
| 245 self.assertTrue(isinstance(analysis, FracasCrashAnalysis)) | |
| 246 self.assertEqual(crash_data['signature'], analysis.signature) | |
| 247 self.assertEqual(crash_data['platform'], analysis.platform) | |
| 248 self.assertEqual(crash_data['stack_trace'], analysis.stack_trace) | |
| 249 self.assertEqual(crash_data['chrome_version'], analysis.crashed_version) | |
| 250 self.assertEqual(crash_data['regression_range'], analysis.regression_range) | |
| 251 | |
| 252 analysis = FracasCrashAnalysis.Get(crash_data['crash_identifiers']) | |
| 253 self.assertEqual(analysis_result, analysis.result) | |
| 254 return analysis | |
| 255 | |
| 256 def testRunningAnalysis(self): | |
| 257 analysis_result = { | |
| 258 'found': True, | |
| 259 'suspected_cls': [], | |
| 260 'other_data': 'data', | |
| 261 } | |
| 262 analysis_tags = { | |
| 263 'found_suspects': True, | |
| 264 'has_regression_range': True, | |
| 265 'solution': 'core', | |
| 266 'unsupported_tag': '', | |
| 267 } | |
| 268 | |
| 269 analysis = self._TestRunningAnalysisForResult( | |
| 270 analysis_result, analysis_tags) | |
| 271 self.assertTrue(analysis.has_regression_range) | |
| 272 self.assertTrue(analysis.found_suspects) | |
| 273 self.assertEqual('core', analysis.solution) | |
| 274 | |
| 275 def testRunningAnalysisNoSuspectsFound(self): | |
| 276 analysis_result = { | |
| 277 'found': False | |
| 278 } | |
| 279 analysis_tags = { | |
| 280 'found_suspects': False, | |
| 281 'has_regression_range': False, | |
| 282 'solution': 'core', | |
| 283 'unsupported_tag': '', | |
| 284 } | |
| 285 | |
| 286 analysis = self._TestRunningAnalysisForResult( | |
| 287 analysis_result, analysis_tags) | |
| 288 self.assertFalse(analysis.has_regression_range) | |
| 289 self.assertFalse(analysis.found_suspects) | |
| 290 self.assertEqual('core', analysis.solution) | |
| 291 | |
| 292 def testRunningAnalysisWithSuspectsCls(self): | |
| 293 analysis_result = { | |
| 294 'found': True, | |
| 295 'suspected_cls': [ | |
| 296 {'confidence': 0.21434, | |
| 297 'reasons': ['reason1', 'reason2'], | |
| 298 'other': 'data'} | |
| 299 ], | |
| 300 'other_data': 'data', | |
| 301 } | |
| 302 analysis_tags = { | |
| 303 'found_suspects': True, | |
| 304 'has_regression_range': True, | |
| 305 'solution': 'core', | |
| 306 'unsupported_tag': '', | |
| 307 } | |
| 308 | |
| 309 analysis = self._TestRunningAnalysisForResult( | |
| 310 analysis_result, analysis_tags) | |
| 311 self.assertTrue(analysis.has_regression_range) | |
| 312 self.assertTrue(analysis.found_suspects) | |
| 313 self.assertEqual('core', analysis.solution) | |
| OLD | NEW |