Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(18)

Side by Side Diff: appengine/findit/crash/test/crash_pipeline_test.py

Issue 2414523002: [Findit] Reorganizing findit_for_*.py (Closed)
Patch Set: rebasing against recently landed cls Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright 2016 The Chromium Authors. All rights reserved. 1 # Copyright 2016 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 import copy 4 import copy
5 import json 5 import json
6 import logging
6 7
7 from google.appengine.api import app_identity 8 from google.appengine.api import app_identity
9 from webtest.app import AppError
8 10
11 from common import chrome_dependency_fetcher
9 from common.pipeline_wrapper import pipeline_handlers 12 from common.pipeline_wrapper import pipeline_handlers
10 from crash import crash_pipeline 13 from crash import crash_pipeline
11 from crash import findit_for_chromecrash 14 from crash.culprit import Culprit
15 from crash.findit_for_chromecrash import FinditForFracas
16 from crash.type_enums import CrashClient
12 from crash.test.crash_testcase import CrashTestCase 17 from crash.test.crash_testcase import CrashTestCase
13 from model import analysis_status 18 from model import analysis_status
14 from model.crash.fracas_crash_analysis import FracasCrashAnalysis 19 from model.crash.fracas_crash_analysis import FracasCrashAnalysis
15 20
21 def DummyCrashData(
22 version='1',
23 signature='signature',
24 platform='win',
25 stack_trace=None,
26 regression_range=None,
27 channel='canary',
28 historical_metadata=None,
29 crash_identifiers=True,
30 process_type='browser'):
31 if crash_identifiers is True:
32 crash_identifiers = {
33 'chrome_version': version,
34 'signature': signature,
35 'channel': channel,
36 'platform': platform,
37 'process_type': process_type,
38 }
39 return {
40 'crashed_version': version,
41 'signature': signature,
42 'platform': platform,
43 'stack_trace': stack_trace,
44 'regression_range': regression_range,
45 'crash_identifiers': crash_identifiers,
46 'customized_data': {
47 'historical_metadata': historical_metadata,
48 'channel': channel,
49 },
50 }
51
52
53 class MockCulprit(object):
54 """Construct a fake culprit where |ToDicts| returns whatever we please."""
55
56 def __init__(self, mock_result, mock_tags):
57 self._result = mock_result
58 self._tags = mock_tags
59
60 def ToDicts(self): # pragma: no cover
61 return self._result, self._tags
62
16 63
17 class CrashPipelineTest(CrashTestCase): 64 class CrashPipelineTest(CrashTestCase):
18 app_module = pipeline_handlers._APP 65 app_module = pipeline_handlers._APP
19 66
20 def testNoAnalysisIfLastOneIsNotFailed(self): 67 def testAnalysisAborted(self):
21 chrome_version = '1' 68 crash_identifiers = DummyCrashData()['crash_identifiers']
22 signature = 'signature'
23 platform = 'win'
24 crash_identifiers = {
25 'chrome_version': chrome_version,
26 'signature': signature,
27 'channel': 'canary',
28 'platform': platform,
29 'process_type': 'browser',
30 }
31 for status in (analysis_status.PENDING, analysis_status.RUNNING,
32 analysis_status.COMPLETED, analysis_status.SKIPPED):
33 analysis = FracasCrashAnalysis.Create(crash_identifiers)
34 analysis.status = status
35 analysis.put()
36 self.assertFalse(crash_pipeline._NeedsNewAnalysis(
37 crash_identifiers, chrome_version, signature, 'fracas',
38 platform, None, {'channel': 'canary'}))
39
40 def testAnalysisNeededIfLastOneFailed(self):
41 chrome_version = '1'
42 signature = 'signature'
43 platform = 'win'
44 crash_identifiers = {
45 'chrome_version': chrome_version,
46 'signature': signature,
47 'channel': 'canary',
48 'platform': platform,
49 'process_type': 'browser',
50 }
51 analysis = FracasCrashAnalysis.Create(crash_identifiers) 69 analysis = FracasCrashAnalysis.Create(crash_identifiers)
52 analysis.status = analysis_status.ERROR 70 analysis.status = analysis_status.RUNNING
53 analysis.put()
54 self.assertTrue(crash_pipeline._NeedsNewAnalysis(
55 crash_identifiers, chrome_version, signature, 'fracas',
56 platform, None, {'channel': 'canary'}))
57
58 def testAnalysisNeededIfNoAnalysisYet(self):
59 chrome_version = '1'
60 signature = 'signature'
61 platform = 'win'
62 crash_identifiers = {
63 'chrome_version': chrome_version,
64 'signature': signature,
65 'channel': 'canary',
66 'platform': platform,
67 'process_type': 'browser',
68 }
69 self.assertTrue(crash_pipeline._NeedsNewAnalysis(
70 crash_identifiers, chrome_version, signature, 'fracas',
71 platform, None, {'channel': 'canary'}))
72
73 def testUnsupportedChannelOrPlatformSkipped(self):
74 self.assertFalse(
75 crash_pipeline.ScheduleNewAnalysisForCrash(
76 {}, None, None, 'fracas', 'win',
77 None, {'channel': 'unsupported_channel',
78 'historical_metadata': None}))
79 self.assertFalse(
80 crash_pipeline.ScheduleNewAnalysisForCrash(
81 {}, None, None, 'fracas', 'unsupported_platform',
82 None, {'channel': 'unsupported_channel',
83 'historical_metadata': None}))
84
85 def testBlackListSignatureSipped(self):
86 self.assertFalse(
87 crash_pipeline.ScheduleNewAnalysisForCrash(
88 {}, None, 'Blacklist marker signature', 'fracas', 'win',
89 None, {'channel': 'canary',
90 'historical_metadata': None}))
91
92 def testPlatformRename(self):
93 def _MockNeedsNewAnalysis(*args):
94 self.assertEqual(args,
95 ({}, None, 'signature', 'fracas', 'unix', None,
96 {'channel': 'canary'}))
97 return False
98
99 self.mock(crash_pipeline, '_NeedsNewAnalysis', _MockNeedsNewAnalysis)
100
101 crash_pipeline.ScheduleNewAnalysisForCrash(
102 {}, None, 'signature', 'fracas', 'linux',
103 None, {'channel': 'canary'})
104
105 def testNoAnalysisNeeded(self):
106 chrome_version = '1'
107 signature = 'signature'
108 platform = 'win'
109 channel = 'canary'
110 crash_identifiers = {
111 'chrome_version': chrome_version,
112 'signature': signature,
113 'channel': channel,
114 'platform': platform,
115 'process_type': 'browser',
116 }
117 analysis = FracasCrashAnalysis.Create(crash_identifiers)
118 analysis.status = analysis_status.COMPLETED
119 analysis.put() 71 analysis.put()
120 72
121 self.assertFalse( 73 pipeline = crash_pipeline.CrashAnalysisPipeline(
122 crash_pipeline.ScheduleNewAnalysisForCrash( 74 CrashClient.FRACAS,
123 crash_identifiers, chrome_version, signature, 'fracas', 75 crash_identifiers)
124 platform, None, {'channel': channel, 76 pipeline._PutAbortedError()
125 'historical_metadata': None})) 77 analysis = FracasCrashAnalysis.Get(crash_identifiers)
78 self.assertEqual(analysis_status.ERROR, analysis.status)
126 79
80
81 # TODO: this function is a gross hack. We should figure out what the
82 # semantic goal really is here, so we can avoid doing such intricate
83 # and fragile mocking.
127 def _TestRunningAnalysisForResult(self, analysis_result, analysis_tags): 84 def _TestRunningAnalysisForResult(self, analysis_result, analysis_tags):
85
86 # Mock out the part of PublishResultPipeline that would go over the wire.
128 pubsub_publish_requests = [] 87 pubsub_publish_requests = []
129 def Mocked_PublishMessagesToTopic(messages_data, topic): 88 def Mocked_PublishMessagesToTopic(messages_data, topic):
130 pubsub_publish_requests.append((messages_data, topic)) 89 pubsub_publish_requests.append((messages_data, topic))
131 self.mock(crash_pipeline.pubsub_util, 'PublishMessagesToTopic', 90 self.mock(crash_pipeline.pubsub_util, 'PublishMessagesToTopic',
132 Mocked_PublishMessagesToTopic) 91 Mocked_PublishMessagesToTopic)
133 92
134 analyzed_crashes = []
135 class Mocked_FinditForChromeCrash(object):
136 def __init__(self, *_):
137 pass
138 def FindCulprit(self, *args):
139 analyzed_crashes.append(args)
140 return analysis_result, analysis_tags
141 self.mock(findit_for_chromecrash, 'FinditForChromeCrash',
142 Mocked_FinditForChromeCrash)
143
144 chrome_version = '1'
145 signature = 'signature'
146 platform = 'win'
147 channel = 'canary'
148 crash_identifiers = {
149 'chrome_version': chrome_version,
150 'signature': signature,
151 'channel': channel,
152 'platform': platform,
153 'process_type': 'browser',
154 }
155 stack_trace = 'frame1\nframe2\nframe3'
156 chrome_version = '50.2500.0.1'
157 historical_metadata = None
158
159 mock_host = 'https://host.com' 93 mock_host = 'https://host.com'
160 self.mock(app_identity, 'get_default_version_hostname', lambda: mock_host) 94 self.mock(app_identity, 'get_default_version_hostname', lambda: mock_host)
161 95
96 # TODO: We need to mock out the pipeline so that it doesn't go over
97 # the wire, and yet still exercises the code we're trying to unittest.
98 # TODO: since |FinditForClientID| automatically feeds
99 # CrashWrapperPipeline in to the Findit constructor; this mock
100 # probably won't work.
101 class _MockPipeline(crash_pipeline.CrashWrapperPipeline):
102 def start(self, **kwargs):
103 logging.info('Mock running on queue %s', kwargs['queue_name'])
104 # TODO: the code below helps to improve the code coverage
105 # percentage, but it leads ot even more verbose failures for
106 # everything that calls _TestRunningAnalysisForResult. If we're
107 # mocking things this way, we should figure out how to do the
108 # callback loop with what |CrashWrapperPipeline.run| yields.
109 #analysis_pipeline = crash_pipeline.CrashAnalysisPipeline(
110 # self._client_id, self._crash_identifiers)
111 #analysis_pipeline.run()
Sharu Jiang 2016/10/26 01:16:00 the self._client_id, self._crash_identifiers shoul
wrengr 2016/10/26 17:05:19 No, the new run methods don't take any arguments.
Sharu Jiang 2016/10/26 19:09:32 Acknowledged.
112 #analysis_pipeline.finalized()
113 #publish_pipeline = crash_pipeline.PublishResultPipeline(
114 # self._client_id, self._crash_identifiers)
115 #publish_pipeline.run()
Sharu Jiang 2016/10/26 01:16:00 Ditto.
116 #publish_pipeline.finalized()
117 raise NotImplementedError(
118 'how can we mock CrashWrapperPipeline correctly?')
119
120 # Mock out FindCulprit to track the number of times it's called and
121 # with which arguments. N.B., the pipeline will reconstruct Findit
122 # objects form their client_id, so we can't mock via subclassing,
123 # we must mock via |self.mock|.
124 mock_culprit = MockCulprit(analysis_result, analysis_tags)
125 analyzed_crashes = []
126 def _MockFindCulprit(_self, *args):
127 analyzed_crashes.append(args)
128 return mock_culprit
129 self.mock(FinditForFracas, 'FindCulprit', _MockFindCulprit)
130
131 # The real |ParseStacktrace| calls |GetChromeDependency|, which
132 # eventually calls |GitRepository.GetSource| and hence goes over
133 # the wire. Since we mocked out |FindCulprit| to no longer call
134 # |ParseStacktrace|, it shouldn't matter what the real
135 # |ParseStacktrace| does. However, since mocking is fragile and it's
136 # hard to triage what actually went wrong if we do end up going over
137 # the wire, we mock this out too just to be safe.
138 def _MockParseStacktrace(_self, _model):
139 raise AssertionError("ParseStacktrace shouldn't ever be called. "
140 'That it was indicates some sort of problem with our mocking code.')
141 self.mock(FinditForFracas, 'ParseStacktrace', _MockParseStacktrace)
142
143 # More directly address the issue about |GetChromeDependency| going
144 # over the wire.
145 def _MockGetChromeDependency(_self, _revision, _platform):
146 raise AssertionError("GetChromeDependency shouldn't ever be called. "
147 'That it was indicates some sort of problem with our mocking code.')
148 self.mock(chrome_dependency_fetcher.ChromeDependencyFetcher,
149 'GetDependency', _MockGetChromeDependency)
150
151 crash_data = DummyCrashData(
152 version = '50.2500.0.1',
153 stack_trace = 'frame1\nframe2\nframe3')
154 # A fake repository, needed by the Findit constructor.
155 mock_repository = None
156 # This call to ScheduleNewAnalysis is the one that was going over
157 # the wire previously. We don't want to go over the wire, so we need
158 # to make sure all thebits and pieces are mocked appropriately.
159 #
160 # TODO(wrengr): and in the latest incarnation of our problems here,
161 # suddenly the call to |self.config| nested deep inside here is
162 # returning None.
162 self.assertTrue( 163 self.assertTrue(
163 crash_pipeline.ScheduleNewAnalysisForCrash( 164 FinditForFracas(mock_repository, _MockPipeline).ScheduleNewAnalysis(
164 crash_identifiers, chrome_version, signature, 'fracas', 165 crash_data))
165 platform, stack_trace,
166 {'channel': channel, 'historical_metadata': historical_metadata}))
167 166
168 self.execute_queued_tasks() 167 # We catch and re-raise to clean up the callstack that's reported.
168 try:
169 self.execute_queued_tasks()
170 except AppError, e:
171 raise e
169 172
170 self.assertEqual(1, len(pubsub_publish_requests)) 173 self.assertEqual(1, len(pubsub_publish_requests))
171 174
172 processed_analysis_result = copy.deepcopy(analysis_result) 175 processed_analysis_result = copy.deepcopy(analysis_result)
173 processed_analysis_result['feedback_url'] = ( 176 processed_analysis_result['feedback_url'] = (
174 mock_host + '/crash/fracas-result-feedback?' 177 mock_host + '/crash/fracas-result-feedback?'
175 'key=agx0ZXN0YmVkLXRlc3RyQQsSE0ZyYWNhc0NyYXNoQW5hbHlzaXMiKGU2ZWIyNj' 178 'key=agx0ZXN0YmVkLXRlc3RyQQsSE0ZyYWNhc0NyYXNoQW5hbHlzaXMiKGU2ZWIyNj'
176 'A2OTBlYTAyMjVjNWNjYTM3ZTNjYTlmYWExOGVmYjVlM2UM') 179 'A2OTBlYTAyMjVjNWNjYTM3ZTNjYTlmYWExOGVmYjVlM2UM')
177 180
178 if 'suspected_cls' in processed_analysis_result: 181 for cl in processed_analysis_result.get('suspected_cls', []):
179 for cl in processed_analysis_result['suspected_cls']: 182 cl['confidence'] = round(cl['confidence'], 2)
180 cl['confidence'] = round(cl['confidence'], 2) 183 cl.pop('reason', None)
181 cl.pop('reason', None)
182 184
183 expected_messages_data = [json.dumps({ 185 expected_messages_data = [json.dumps({
184 'crash_identifiers': crash_identifiers, 186 'crash_identifiers': crash_data['crash_identifiers'],
185 'client_id': 'fracas', 187 'client_id': CrashClient.FRACAS,
186 'result': processed_analysis_result, 188 'result': processed_analysis_result,
187 }, sort_keys=True)] 189 }, sort_keys=True)]
188 self.assertEqual(expected_messages_data, pubsub_publish_requests[0][0]) 190 self.assertEqual(expected_messages_data, pubsub_publish_requests[0][0])
189 191
190 self.assertEqual(1, len(analyzed_crashes)) 192 self.assertEqual(1, len(analyzed_crashes))
191 self.assertEqual( 193 self.assertEqual(
192 (signature, platform, stack_trace, chrome_version, None), 194 (crash_data['signature'], crash_data['platform'],
195 crash_data['stack_trace'], crash_data['chrome_version'], None),
193 analyzed_crashes[0]) 196 analyzed_crashes[0])
194 197
195 analysis = FracasCrashAnalysis.Get(crash_identifiers) 198 analysis = FracasCrashAnalysis.Get(crash_data['crash_identifiers'])
196 self.assertEqual(analysis_result, analysis.result) 199 self.assertEqual(analysis_result, analysis.result)
197 return analysis 200 return analysis
198 201
199 202
203
204
200 def testRunningAnalysis(self): 205 def testRunningAnalysis(self):
201 analysis_result = { 206 analysis_result = {
202 'found': True, 207 'found': True,
203 'suspected_cls': [], 208 'suspected_cls': [],
204 'other_data': 'data', 209 'other_data': 'data',
205 } 210 }
206 analysis_tags = { 211 analysis_tags = {
207 'found_suspects': True, 212 'found_suspects': True,
208 'has_regression_range': True, 213 'has_regression_range': True,
209 'solution': 'core', 214 'solution': 'core',
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
248 'has_regression_range': True, 253 'has_regression_range': True,
249 'solution': 'core', 254 'solution': 'core',
250 'unsupported_tag': '', 255 'unsupported_tag': '',
251 } 256 }
252 257
253 analysis = self._TestRunningAnalysisForResult( 258 analysis = self._TestRunningAnalysisForResult(
254 analysis_result, analysis_tags) 259 analysis_result, analysis_tags)
255 self.assertTrue(analysis.has_regression_range) 260 self.assertTrue(analysis.has_regression_range)
256 self.assertTrue(analysis.found_suspects) 261 self.assertTrue(analysis.found_suspects)
257 self.assertEqual('core', analysis.solution) 262 self.assertEqual('core', analysis.solution)
258
259 def testAnalysisAborted(self):
260 chrome_version = '1'
261 signature = 'signature'
262 platform = 'win'
263 crash_identifiers = {
264 'chrome_version': chrome_version,
265 'signature': signature,
266 'channel': 'canary',
267 'platform': platform,
268 'process_type': 'browser',
269 }
270 analysis = FracasCrashAnalysis.Create(crash_identifiers)
271 analysis.status = analysis_status.RUNNING
272 analysis.put()
273
274 pipeline = crash_pipeline.CrashAnalysisPipeline(crash_identifiers, 'fracas')
275 pipeline._SetErrorIfAborted(True)
276 analysis = FracasCrashAnalysis.Get(crash_identifiers)
277 self.assertEqual(analysis_status.ERROR, analysis.status)
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698