Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(140)

Side by Side Diff: appengine/findit/crash/crash_pipeline.py

Issue 2299883005: [Findit] Add findit_for_client to do analysis based on client_id (Closed)
Patch Set: Address comments and rename findit_for_fracas to findit_for_chromecrash Created 4 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright 2016 The Chromium Authors. All rights reserved. 1 # Copyright 2016 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 import copy 5 import copy
6 import json 6 import json
7 import logging 7 import logging
8 8
9 from google.appengine.api import app_identity 9 from google.appengine.api import app_identity
10 from google.appengine.ext import ndb 10 from google.appengine.ext import ndb
11 11
12 from common import appengine_util 12 from common import appengine_util
13 from common import constants 13 from common import constants
14 from common import pubsub_util 14 from common import pubsub_util
15 from common import time_util 15 from common import time_util
16 from common.pipeline_wrapper import BasePipeline 16 from common.pipeline_wrapper import BasePipeline
17 from common.pipeline_wrapper import pipeline 17 from common.pipeline_wrapper import pipeline
18 from crash import findit_for_fracas 18 from crash import findit_for_client
19 from model import analysis_status 19 from model import analysis_status
20 from model.crash.crash_config import CrashConfig 20 from model.crash.crash_config import CrashConfig
21 from model.crash.fracas_crash_analysis import FracasCrashAnalysis
22
23 _FINDIT_FEEDBACK_URL_TEMPLATE = '%s/crash/fracas-result-feedback?key=%s'
24 21
25 22
26 class CrashBasePipeline(BasePipeline): 23 class CrashBasePipeline(BasePipeline):
27 def __init__(self, crash_identifiers): 24 def __init__(self, crash_identifiers, client_id):
28 super(CrashBasePipeline, self).__init__(crash_identifiers) 25 super(CrashBasePipeline, self).__init__(crash_identifiers, client_id)
29 self.crash_identifiers = crash_identifiers 26 self.crash_identifiers = crash_identifiers
27 self.client_id = client_id
30 28
31 def run(self, *args, **kwargs): 29 def run(self, *args, **kwargs):
32 raise NotImplementedError() 30 raise NotImplementedError()
33 31
34 32
35 class CrashAnalysisPipeline(CrashBasePipeline): 33 class CrashAnalysisPipeline(CrashBasePipeline):
36 def _SetErrorIfAborted(self, aborted): 34 def _SetErrorIfAborted(self, aborted):
37 if not aborted: 35 if not aborted:
38 return 36 return
39 37
40 logging.error('Aborted analysis for %s', repr(self.crash_identifiers)) 38 logging.error('Aborted analysis for %s', repr(self.crash_identifiers))
41 analysis = FracasCrashAnalysis.Get(self.crash_identifiers) 39 analysis = findit_for_client.GetAnalysisForClient(self.crash_identifiers,
40 self.client_id)
42 analysis.status = analysis_status.ERROR 41 analysis.status = analysis_status.ERROR
43 analysis.put() 42 analysis.put()
44 43
45 def finalized(self): 44 def finalized(self):
46 self._SetErrorIfAborted(self.was_aborted) 45 self._SetErrorIfAborted(self.was_aborted)
47 46
48 # Arguments number differs from overridden method - pylint: disable=W0221 47 # Arguments number differs from overridden method - pylint: disable=W0221
49 def run(self, crash_identifiers): 48 def run(self, crash_identifiers, client_id):
50 analysis = FracasCrashAnalysis.Get(crash_identifiers) 49 analysis = findit_for_client.GetAnalysisForClient(crash_identifiers,
50 client_id)
51 51
52 # Update analysis status. 52 # Update analysis status.
53 analysis.pipeline_status_path = self.pipeline_status_path() 53 analysis.pipeline_status_path = self.pipeline_status_path()
54 analysis.status = analysis_status.RUNNING 54 analysis.status = analysis_status.RUNNING
55 analysis.started_time = time_util.GetUTCNow() 55 analysis.started_time = time_util.GetUTCNow()
56 analysis.findit_version = appengine_util.GetCurrentVersion() 56 analysis.findit_version = appengine_util.GetCurrentVersion()
57 analysis.put() 57 analysis.put()
58 58
59 # Run the analysis. 59 # Run the analysis.
60 result, tags = findit_for_fracas.FindCulpritForChromeCrash( 60 result, tags = findit_for_client.FindCulprit(analysis)
61 analysis.signature, analysis.platform, analysis.stack_trace,
62 analysis.crashed_version, analysis.historical_metadata)
63 61
64 # Update analysis status and save the analysis result. 62 # Update analysis status and save the analysis result.
65 analysis.completed_time = time_util.GetUTCNow() 63 analysis.completed_time = time_util.GetUTCNow()
66 analysis.result = result 64 analysis.result = result
67 for tag_name, tag_value in tags.iteritems(): 65 for tag_name, tag_value in tags.iteritems():
68 # TODO(http://crbug.com/602702): make it possible to add arbitrary tags. 66 # TODO(http://crbug.com/602702): make it possible to add arbitrary tags.
69 if hasattr(analysis, tag_name): 67 if hasattr(analysis, tag_name):
70 setattr(analysis, tag_name, tag_value) 68 setattr(analysis, tag_name, tag_value)
71 analysis.status = analysis_status.COMPLETED 69 analysis.status = analysis_status.COMPLETED
72 analysis.put() 70 analysis.put()
73 71
74 72
75 class PublishResultPipeline(CrashBasePipeline): 73 class PublishResultPipeline(CrashBasePipeline):
76 def finalized(self): 74 def finalized(self):
77 if self.was_aborted: # pragma: no cover. 75 if self.was_aborted: # pragma: no cover.
78 logging.error('Failed to publish analysis result for %s', 76 logging.error('Failed to publish %s analysis result for %s',
79 repr(self.crash_identifiers)) 77 repr(self.crash_identifiers), self.client_id)
80 78
81 def PostProcessResults(self, analysis, crash_identifiers):
82 analysis_result = copy.deepcopy(analysis.result)
83 analysis_result['feedback_url'] = _FINDIT_FEEDBACK_URL_TEMPLATE % (
84 app_identity.get_default_version_hostname(), analysis.key.urlsafe())
85 if analysis_result['found']:
86 for cl in analysis_result['suspected_cls']:
87 cl['confidence'] = round(cl['confidence'], 2)
88 cl.pop('reason', None)
89
90 return {
91 'crash_identifiers': crash_identifiers,
92 'client_id': analysis.client_id,
93 'result': analysis_result,
94 }
95 79
96 # Arguments number differs from overridden method - pylint: disable=W0221 80 # Arguments number differs from overridden method - pylint: disable=W0221
97 def run(self, crash_identifiers): 81 def run(self, crash_identifiers, client_id):
98 analysis = FracasCrashAnalysis.Get(crash_identifiers) 82 analysis = findit_for_client.GetAnalysisForClient(crash_identifiers,
99 result = self.PostProcessResults(analysis, crash_identifiers) 83 client_id)
84 result = findit_for_client.GetPublishResultFromAnalysis(analysis,
85 crash_identifiers,
86 client_id)
100 messages_data = [json.dumps(result, sort_keys=True)] 87 messages_data = [json.dumps(result, sort_keys=True)]
101 88
102 crash_config = CrashConfig.Get() 89 client_config = getattr(CrashConfig.Get(), client_id)
Martin Barbella 2016/09/17 23:20:35 This feels pretty hacky. If this is a common patte
Sharu Jiang 2016/09/19 21:35:59 Done.
103 topic = crash_config.fracas['analysis_result_pubsub_topic'] 90 topic = client_config['analysis_result_pubsub_topic']
Martin Barbella 2016/09/17 23:20:35 Using strings in these places is very error prone.
Sharu Jiang 2016/09/19 21:35:59 Add a TODO, will address this in another cl.
104 pubsub_util.PublishMessagesToTopic(messages_data, topic) 91 pubsub_util.PublishMessagesToTopic(messages_data, topic)
105 logging.info('Published analysis result for %s', repr(crash_identifiers)) 92 logging.info('Published %s analysis result for %s', client_id,
93 repr(crash_identifiers))
106 94
107 95
108 class CrashWrapperPipeline(BasePipeline): 96 class CrashWrapperPipeline(BasePipeline):
109 # Arguments number differs from overridden method - pylint: disable=W0221 97 # Arguments number differs from overridden method - pylint: disable=W0221
110 def run(self, crash_identifiers): 98 def run(self, crash_identifiers, client_id):
111 run_analysis = yield CrashAnalysisPipeline(crash_identifiers) 99 run_analysis = yield CrashAnalysisPipeline(crash_identifiers, client_id)
112 with pipeline.After(run_analysis): 100 with pipeline.After(run_analysis):
113 yield PublishResultPipeline(crash_identifiers) 101 yield PublishResultPipeline(crash_identifiers, client_id)
114 102
115 103
116 @ndb.transactional 104 @ndb.transactional
117 def _NeedsNewAnalysis( 105 def _NeedsNewAnalysis(
118 crash_identifiers, chrome_version, signature, client_id, 106 crash_identifiers, chrome_version, signature, client_id,
119 platform, stack_trace, channel, historical_metadata): 107 platform, stack_trace, customized_data):
120 analysis = FracasCrashAnalysis.Get(crash_identifiers) 108 analysis = findit_for_client.GetAnalysisForClient(crash_identifiers,
109 client_id)
121 if analysis and not analysis.failed: 110 if analysis and not analysis.failed:
122 # A new analysis is not needed if last one didn't complete or succeeded. 111 # A new analysis is not needed if last one didn't complete or succeeded.
123 # TODO(http://crbug.com/600535): re-analyze if stack trace or regression 112 # TODO(http://crbug.com/600535): re-analyze if stack trace or regression
124 # range changed. 113 # range changed.
125 logging.info('The analysis of %s has already been done.', 114 logging.info('The analysis of %s has already been done.',
126 repr(crash_identifiers)) 115 repr(crash_identifiers))
127 return False 116 return False
128 117
129 if not analysis: 118 return findit_for_client.ResetAnalysis(analysis, crash_identifiers,
130 # A new analysis is needed if there is no analysis yet. 119 chrome_version, signature, client_id,
131 analysis = FracasCrashAnalysis.Create(crash_identifiers) 120 platform, stack_trace, customized_data)
132
133 analysis.Reset()
134
135 # Set common properties.
136 analysis.crashed_version = chrome_version
137 analysis.stack_trace = stack_trace
138 analysis.signature = signature
139 analysis.platform = platform
140 analysis.client_id = client_id
141
142 # Set customized properties.
143 analysis.historical_metadata = historical_metadata
144 analysis.channel = channel
145
146 # Set analysis progress properties.
147 analysis.status = analysis_status.PENDING
148 analysis.requested_time = time_util.GetUTCNow()
149
150 analysis.put()
151
152 return True
153 121
154 122
155 def ScheduleNewAnalysisForCrash( 123 def ScheduleNewAnalysisForCrash(
156 crash_identifiers, chrome_version, signature, client_id, 124 crash_identifiers, chrome_version, signature, client_id,
157 platform, stack_trace, channel, historical_metadata, 125 platform, stack_trace, customized_data,
158 queue_name=constants.DEFAULT_QUEUE): 126 queue_name=constants.DEFAULT_QUEUE):
159 """Schedules an analysis.""" 127 """Schedules an analysis."""
160 crash_config = CrashConfig.Get() 128 # Check policy and tune arguments if needed.
161 if platform not in crash_config.fracas.get( 129 pass_policy, updated_analysis_args = findit_for_client.CheckPolicyForClient(
162 'supported_platform_list_by_channel', {}).get(channel, []): 130 crash_identifiers, chrome_version, signature,
Martin Barbella 2016/09/17 23:20:35 Nit: this formatting looks a little wonky. Could y
Sharu Jiang 2016/09/19 21:35:59 In order to avoid this cl growing too big, I will
163 # Bail out if either the channel or platform is not supported yet. 131 client_id, platform, stack_trace,
164 logging.info('Ananlysis of channel %s, platform %s is not supported. ' 132 customized_data)
165 'No analysis is scheduled for %s', 133
166 channel, platform, repr(crash_identifiers)) 134 if not pass_policy:
167 return False 135 return False
168 136
169 # TODO(katesonia): Remove the default value after adding validity check to 137 if _NeedsNewAnalysis(*updated_analysis_args):
170 # config. 138 analysis_pipeline = CrashWrapperPipeline(crash_identifiers, client_id)
171 for blacklist_marker in crash_config.fracas.get(
172 'signature_blacklist_markers', []):
173 if blacklist_marker in signature:
174 logging.info('%s signature is not supported. '
175 'No analysis is scheduled for %s', blacklist_marker,
176 repr(crash_identifiers))
177 return False
178
179 # TODO(katesonia): Remove the default value after adding validity check to
180 # config.
181 platform_rename = crash_config.fracas.get('platform_rename', {})
182 if platform in platform_rename:
183 platform = platform_rename[platform]
184
185 if _NeedsNewAnalysis(crash_identifiers, chrome_version, signature, client_id,
186 platform, stack_trace, channel, historical_metadata):
187 analysis_pipeline = CrashWrapperPipeline(crash_identifiers)
188 # Attribute defined outside __init__ - pylint: disable=W0201 139 # Attribute defined outside __init__ - pylint: disable=W0201
189 analysis_pipeline.target = appengine_util.GetTargetNameForModule( 140 analysis_pipeline.target = appengine_util.GetTargetNameForModule(
190 constants.CRASH_BACKEND_FRACAS) 141 constants.CRASH_BACKEND[client_id])
191 analysis_pipeline.start(queue_name=queue_name) 142 analysis_pipeline.start(queue_name=queue_name)
192 logging.info('New analysis is scheduled for %s', repr(crash_identifiers)) 143 logging.info('New %s analysis is scheduled for %s', client_id,
144 repr(crash_identifiers))
193 return True 145 return True
194 146
195 return False 147 return False
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698