Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(429)

Side by Side Diff: appengine/findit/crash/test/crash_pipeline_test.py

Issue 2414523002: [Findit] Reorganizing findit_for_*.py (Closed)
Patch Set: Addressing the crash_config.fracas issue Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright 2016 The Chromium Authors. All rights reserved. 1 # Copyright 2016 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 import copy 4 import copy
5 import json 5 import json
6 import logging
6 7
7 from google.appengine.api import app_identity 8 from google.appengine.api import app_identity
9 from webtest.app import AppError
8 10
11 from common import chromium_deps
9 from common.pipeline_wrapper import pipeline_handlers 12 from common.pipeline_wrapper import pipeline_handlers
10 from crash import crash_pipeline 13 from crash import crash_pipeline
11 from crash import findit_for_chromecrash 14 from crash.culprit import Culprit
15 from crash.findit_for_chromecrash import FinditForFracas
16 from crash.type_enums import CrashClient
12 from crash.test.crash_testcase import CrashTestCase 17 from crash.test.crash_testcase import CrashTestCase
13 from model import analysis_status 18 from model import analysis_status
14 from model.crash.fracas_crash_analysis import FracasCrashAnalysis 19 from model.crash.fracas_crash_analysis import FracasCrashAnalysis
15 20
16 21
22 def DummyCrashData(
23 version='1',
24 signature='signature',
25 platform='win',
26 stack_trace=None,
27 regression_range=None,
28 channel='canary',
29 historical_metadata=None,
30 crash_identifiers=True,
31 process_type='browser'):
32 if crash_identifiers is True:
33 crash_identifiers = {
34 'chrome_version': version,
35 'signature': signature,
36 'channel': channel,
37 'platform': platform,
38 'process_type': process_type,
39 }
40 return {
41 'crashed_version': version,
42 'signature': signature,
43 'platform': platform,
44 'stack_trace': stack_trace,
45 'regression_range': regression_range,
46 'crash_identifiers': crash_identifiers,
47 'customized_data': {
48 'historical_metadata': historical_metadata,
49 'channel': channel,
50 },
51 }
52
53
54 class MockCulprit(object):
55 """Construct a fake culprit where |ToDicts| returns whatever we please."""
56
57 def __init__(self, mock_result, mock_tags):
58 self._result = mock_result
59 self._tags = mock_tags
60
61 def ToDicts(self): # pragma: no cover
62 return self._result, self._tags
63
64
17 class CrashPipelineTest(CrashTestCase): 65 class CrashPipelineTest(CrashTestCase):
18 app_module = pipeline_handlers._APP 66 app_module = pipeline_handlers._APP
19 67
20 def testNoAnalysisIfLastOneIsNotFailed(self): 68 def testAnalysisAborted(self):
21 chrome_version = '1' 69 crash_identifiers = DummyCrashData()['crash_identifiers']
22 signature = 'signature'
23 platform = 'win'
24 crash_identifiers = {
25 'chrome_version': chrome_version,
26 'signature': signature,
27 'channel': 'canary',
28 'platform': platform,
29 'process_type': 'browser',
30 }
31 for status in (analysis_status.PENDING, analysis_status.RUNNING,
32 analysis_status.COMPLETED, analysis_status.SKIPPED):
33 analysis = FracasCrashAnalysis.Create(crash_identifiers)
34 analysis.status = status
35 analysis.put()
36 self.assertFalse(crash_pipeline._NeedsNewAnalysis(
37 crash_identifiers, chrome_version, signature, 'fracas',
38 platform, None, {'channel': 'canary'}))
39
40 def testAnalysisNeededIfLastOneFailed(self):
41 chrome_version = '1'
42 signature = 'signature'
43 platform = 'win'
44 crash_identifiers = {
45 'chrome_version': chrome_version,
46 'signature': signature,
47 'channel': 'canary',
48 'platform': platform,
49 'process_type': 'browser',
50 }
51 analysis = FracasCrashAnalysis.Create(crash_identifiers) 70 analysis = FracasCrashAnalysis.Create(crash_identifiers)
52 analysis.status = analysis_status.ERROR 71 analysis.status = analysis_status.RUNNING
53 analysis.put()
54 self.assertTrue(crash_pipeline._NeedsNewAnalysis(
55 crash_identifiers, chrome_version, signature, 'fracas',
56 platform, None, {'channel': 'canary'}))
57
58 def testAnalysisNeededIfNoAnalysisYet(self):
59 chrome_version = '1'
60 signature = 'signature'
61 platform = 'win'
62 crash_identifiers = {
63 'chrome_version': chrome_version,
64 'signature': signature,
65 'channel': 'canary',
66 'platform': platform,
67 'process_type': 'browser',
68 }
69 self.assertTrue(crash_pipeline._NeedsNewAnalysis(
70 crash_identifiers, chrome_version, signature, 'fracas',
71 platform, None, {'channel': 'canary'}))
72
73 def testUnsupportedChannelOrPlatformSkipped(self):
74 self.assertFalse(
75 crash_pipeline.ScheduleNewAnalysisForCrash(
76 {}, None, None, 'fracas', 'win',
77 None, {'channel': 'unsupported_channel',
78 'historical_metadata': None}))
79 self.assertFalse(
80 crash_pipeline.ScheduleNewAnalysisForCrash(
81 {}, None, None, 'fracas', 'unsupported_platform',
82 None, {'channel': 'unsupported_channel',
83 'historical_metadata': None}))
84
85 def testBlackListSignatureSipped(self):
86 self.assertFalse(
87 crash_pipeline.ScheduleNewAnalysisForCrash(
88 {}, None, 'Blacklist marker signature', 'fracas', 'win',
89 None, {'channel': 'canary',
90 'historical_metadata': None}))
91
92 def testPlatformRename(self):
93 def _MockNeedsNewAnalysis(*args):
94 self.assertEqual(args,
95 ({}, None, 'signature', 'fracas', 'unix', None,
96 {'channel': 'canary'}))
97 return False
98
99 self.mock(crash_pipeline, '_NeedsNewAnalysis', _MockNeedsNewAnalysis)
100
101 crash_pipeline.ScheduleNewAnalysisForCrash(
102 {}, None, 'signature', 'fracas', 'linux',
103 None, {'channel': 'canary'})
104
105 def testNoAnalysisNeeded(self):
106 chrome_version = '1'
107 signature = 'signature'
108 platform = 'win'
109 channel = 'canary'
110 crash_identifiers = {
111 'chrome_version': chrome_version,
112 'signature': signature,
113 'channel': channel,
114 'platform': platform,
115 'process_type': 'browser',
116 }
117 analysis = FracasCrashAnalysis.Create(crash_identifiers)
118 analysis.status = analysis_status.COMPLETED
119 analysis.put() 72 analysis.put()
120 73
121 self.assertFalse( 74 pipeline = crash_pipeline.CrashAnalysisPipeline(
122 crash_pipeline.ScheduleNewAnalysisForCrash( 75 CrashClient.FRACAS,
123 crash_identifiers, chrome_version, signature, 'fracas', 76 crash_identifiers)
124 platform, None, {'channel': channel, 77 pipeline._PutAbortedError()
125 'historical_metadata': None})) 78 analysis = FracasCrashAnalysis.Get(crash_identifiers)
79 self.assertEqual(analysis_status.ERROR, analysis.status)
126 80
81
82 # TODO: this function is a gross hack. We should figure out what the
83 # semantic goal really is here, so we can avoid doing such intricate
84 # and fragile mocking.
127 def _TestRunningAnalysisForResult(self, analysis_result, analysis_tags): 85 def _TestRunningAnalysisForResult(self, analysis_result, analysis_tags):
86
87 # Mock out the part of PublishResultPipeline that would go over the wire.
128 pubsub_publish_requests = [] 88 pubsub_publish_requests = []
129 def Mocked_PublishMessagesToTopic(messages_data, topic): 89 def Mocked_PublishMessagesToTopic(messages_data, topic):
130 pubsub_publish_requests.append((messages_data, topic)) 90 pubsub_publish_requests.append((messages_data, topic))
131 self.mock(crash_pipeline.pubsub_util, 'PublishMessagesToTopic', 91 self.mock(crash_pipeline.pubsub_util, 'PublishMessagesToTopic',
132 Mocked_PublishMessagesToTopic) 92 Mocked_PublishMessagesToTopic)
133 93
134 analyzed_crashes = []
135 class Mocked_FinditForChromeCrash(object):
136 def __init__(self, *_):
137 pass
138 def FindCulprit(self, *args):
139 analyzed_crashes.append(args)
140 return analysis_result, analysis_tags
141 self.mock(findit_for_chromecrash, 'FinditForChromeCrash',
142 Mocked_FinditForChromeCrash)
143
144 chrome_version = '1'
145 signature = 'signature'
146 platform = 'win'
147 channel = 'canary'
148 crash_identifiers = {
149 'chrome_version': chrome_version,
150 'signature': signature,
151 'channel': channel,
152 'platform': platform,
153 'process_type': 'browser',
154 }
155 stack_trace = 'frame1\nframe2\nframe3'
156 chrome_version = '50.2500.0.1'
157 historical_metadata = None
158
159 mock_host = 'https://host.com' 94 mock_host = 'https://host.com'
160 self.mock(app_identity, 'get_default_version_hostname', lambda: mock_host) 95 self.mock(app_identity, 'get_default_version_hostname', lambda: mock_host)
161 96
162 self.assertTrue( 97 # TODO: We need to mock out the pipeline so that it doesn't go over
163 crash_pipeline.ScheduleNewAnalysisForCrash( 98 # the wire, and yet still exercises the code we're trying to unittest.
164 crash_identifiers, chrome_version, signature, 'fracas', 99 # TODO: since |FinditForClientID| automatically feeds
165 platform, stack_trace, 100 # CrashWrapperPipeline in to the Findit constructor; this mock
166 {'channel': channel, 'historical_metadata': historical_metadata})) 101 # probably won't work.
102 class _MockPipeline(crash_pipeline.CrashWrapperPipeline):
103 def start(self, **kwargs):
104 logging.info('Mock running on queue %s', kwargs['queue_name'])
105 # TODO: the code below helps to improve the code coverage
106 # percentage, but it leads ot even more verbose failures for
107 # everything that calls _TestRunningAnalysisForResult. If we're
108 # mocking things this way, we should figure out how to do the
109 # callback loop with what |CrashWrapperPipeline.run| yields.
110 #analysis_pipeline = crash_pipeline.CrashAnalysisPipeline(
111 # self._client_id, self._crash_identifiers)
112 #analysis_pipeline.run()
113 #analysis_pipeline.finalized()
114 #publish_pipeline = crash_pipeline.PublishResultPipeline(
115 # self._client_id, self._crash_identifiers)
116 #publish_pipeline.run()
117 #publish_pipeline.finalized()
118 raise NotImplementedError(
119 'how can we mock CrashWrapperPipeline correctly?')
167 120
168 self.execute_queued_tasks() 121 # Mock out FindCulprit to track the number of times it's called and
122 # with which arguments. N.B., the pipeline will reconstruct Findit
123 # objects form their client_id, so we can't mock via subclassing,
124 # we must mock via |self.mock|.
125 mock_culprit = MockCulprit(analysis_result, analysis_tags)
126 analyzed_crashes = []
127 def _MockFindCulprit(_self, *args):
128 analyzed_crashes.append(args)
129 return mock_culprit
130 self.mock(FinditForFracas, 'FindCulprit', _MockFindCulprit)
131
132 # The real |ParseStacktrace| calls |GetChromeDependency|, which
133 # eventually calls |GitRepository.GetSource| and hence goes over
134 # the wire. Since we mocked out |FindCulprit| to no longer call
135 # |ParseStacktrace|, it shouldn't matter what the real
136 # |ParseStacktrace| does. However, since mocking is fragile and it's
137 # hard to triage what actually went wrong if we do end up going over
138 # the wire, we mock this out too just to be safe.
139 def _MockParseStacktrace(_self, _model):
140 raise AssertionError("ParseStacktrace shouldn't ever be called. "
141 'That it was indicates some sort of problem with our mocking code.')
142 self.mock(FinditForFracas, 'ParseStacktrace', _MockParseStacktrace)
143
144 # More directly address the issue about |GetChromeDependency| going
145 # over the wire.
146 def _MockGetChromeDependency(_self, _model):
147 raise AssertionError("GetChromeDependency shouldn't ever be called. "
148 'That it was indicates some sort of problem with our mocking code.')
149 self.mock(chromium_deps, 'GetChromeDependency', _MockGetChromeDependency)
150
151 crash_data = DummyCrashData(
152 version = '50.2500.0.1',
153 stack_trace = 'frame1\nframe2\nframe3')
154 # This call to ScheduleNewAnalysis is the one that was going over
155 # the wire previously. We don't want to go over the wire, so we need
156 # to make sure all thebits and pieces are mocked appropriately.
157 #
158 # TODO(wrengr): and in the latest incarnation of our problems here,
159 # suddenly the call to |self.config| nested deep instide here is
160 # returning None.
161 self.assertTrue(FinditForFracas(_MockPipeline).ScheduleNewAnalysis(
162 crash_data))
163
164 # We catch and re-raise to clean up the callstack that's reported.
165 try:
166 self.execute_queued_tasks()
167 except AppError, e:
168 raise e
169 169
170 self.assertEqual(1, len(pubsub_publish_requests)) 170 self.assertEqual(1, len(pubsub_publish_requests))
171 171
172 processed_analysis_result = copy.deepcopy(analysis_result) 172 processed_analysis_result = copy.deepcopy(analysis_result)
173 processed_analysis_result['feedback_url'] = ( 173 processed_analysis_result['feedback_url'] = (
174 mock_host + '/crash/fracas-result-feedback?' 174 mock_host + '/crash/fracas-result-feedback?'
175 'key=agx0ZXN0YmVkLXRlc3RyQQsSE0ZyYWNhc0NyYXNoQW5hbHlzaXMiKGU2ZWIyNj' 175 'key=agx0ZXN0YmVkLXRlc3RyQQsSE0ZyYWNhc0NyYXNoQW5hbHlzaXMiKGU2ZWIyNj'
176 'A2OTBlYTAyMjVjNWNjYTM3ZTNjYTlmYWExOGVmYjVlM2UM') 176 'A2OTBlYTAyMjVjNWNjYTM3ZTNjYTlmYWExOGVmYjVlM2UM')
177 177
178 if 'suspected_cls' in processed_analysis_result: 178 for cl in processed_analysis_result.get('suspected_cls', []):
179 for cl in processed_analysis_result['suspected_cls']: 179 cl['confidence'] = round(cl['confidence'], 2)
180 cl['confidence'] = round(cl['confidence'], 2) 180 cl.pop('reason', None)
181 cl.pop('reason', None)
182 181
183 expected_messages_data = [json.dumps({ 182 expected_messages_data = [json.dumps({
184 'crash_identifiers': crash_identifiers, 183 'crash_identifiers': crash_data['crash_identifiers'],
185 'client_id': 'fracas', 184 'client_id': CrashClient.FRACAS,
186 'result': processed_analysis_result, 185 'result': processed_analysis_result,
187 }, sort_keys=True)] 186 }, sort_keys=True)]
188 self.assertEqual(expected_messages_data, pubsub_publish_requests[0][0]) 187 self.assertEqual(expected_messages_data, pubsub_publish_requests[0][0])
189 188
190 self.assertEqual(1, len(analyzed_crashes)) 189 self.assertEqual(1, len(analyzed_crashes))
191 self.assertEqual( 190 self.assertEqual(
192 (signature, platform, stack_trace, chrome_version, None), 191 (crash_data['signature'], crash_data['platform'],
192 crash_data['stack_trace'], crash_data['chrome_version'], None),
193 analyzed_crashes[0]) 193 analyzed_crashes[0])
194 194
195 analysis = FracasCrashAnalysis.Get(crash_identifiers) 195 analysis = FracasCrashAnalysis.Get(crash_data['crash_identifiers'])
196 self.assertEqual(analysis_result, analysis.result) 196 self.assertEqual(analysis_result, analysis.result)
197 return analysis 197 return analysis
198 198
199 199
stgao 2016/10/25 18:03:41 nit: too many empty lines.
wrengr 2016/10/25 19:49:54 Acknowledged.
200
201
200 def testRunningAnalysis(self): 202 def testRunningAnalysis(self):
201 analysis_result = { 203 analysis_result = {
202 'found': True, 204 'found': True,
203 'suspected_cls': [], 205 'suspected_cls': [],
204 'other_data': 'data', 206 'other_data': 'data',
205 } 207 }
206 analysis_tags = { 208 analysis_tags = {
207 'found_suspects': True, 209 'found_suspects': True,
208 'has_regression_range': True, 210 'has_regression_range': True,
209 'solution': 'core', 211 'solution': 'core',
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
248 'has_regression_range': True, 250 'has_regression_range': True,
249 'solution': 'core', 251 'solution': 'core',
250 'unsupported_tag': '', 252 'unsupported_tag': '',
251 } 253 }
252 254
253 analysis = self._TestRunningAnalysisForResult( 255 analysis = self._TestRunningAnalysisForResult(
254 analysis_result, analysis_tags) 256 analysis_result, analysis_tags)
255 self.assertTrue(analysis.has_regression_range) 257 self.assertTrue(analysis.has_regression_range)
256 self.assertTrue(analysis.found_suspects) 258 self.assertTrue(analysis.found_suspects)
257 self.assertEqual('core', analysis.solution) 259 self.assertEqual('core', analysis.solution)
258
259 def testAnalysisAborted(self):
260 chrome_version = '1'
261 signature = 'signature'
262 platform = 'win'
263 crash_identifiers = {
264 'chrome_version': chrome_version,
265 'signature': signature,
266 'channel': 'canary',
267 'platform': platform,
268 'process_type': 'browser',
269 }
270 analysis = FracasCrashAnalysis.Create(crash_identifiers)
271 analysis.status = analysis_status.RUNNING
272 analysis.put()
273
274 pipeline = crash_pipeline.CrashAnalysisPipeline(crash_identifiers, 'fracas')
275 pipeline._SetErrorIfAborted(True)
276 analysis = FracasCrashAnalysis.Get(crash_identifiers)
277 self.assertEqual(analysis_status.ERROR, analysis.status)
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698