Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(4)

Side by Side Diff: appengine/findit/handlers/crash/crash_handler.py

Issue 2455053004: Moving ScheduleNewAnalysis to break the cycle (Closed)
Patch Set: rebase & adding some tests Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright 2016 The Chromium Authors. All rights reserved. 1 # Copyright 2016 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 import base64 5 import base64
6 import json 6 import json
7 import logging 7 import logging
8 8
9 from common import constants 9 from common import constants
10 from common import appengine_util
10 from common.base_handler import BaseHandler 11 from common.base_handler import BaseHandler
11 from common.base_handler import Permission 12 from common.base_handler import Permission
12 from crash.crash_pipeline import FinditForClientID 13 from crash import crash_pipeline
14 from crash.crash_pipeline import CrashWrapperPipeline
13 from crash.crash_report import CrashReport 15 from crash.crash_report import CrashReport
14 16
15 17
16 class CrashHandler(BaseHandler): 18 class CrashHandler(BaseHandler):
17 PERMISSION_LEVEL = Permission.ANYONE 19 PERMISSION_LEVEL = Permission.ANYONE
18 20
19 def HandlePost(self): 21 def HandlePost(self):
20 """Handles push delivery from Pub/Sub for crash data. 22 """Handles push delivery from Pub/Sub for crash data.
21 23
22 The crash data should be in the following json format: 24 The crash data should be in the following json format:
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
93 } 95 }
94 """ 96 """
95 try: 97 try:
96 received_message = json.loads(self.request.body) 98 received_message = json.loads(self.request.body)
97 pubsub_message = received_message['message'] 99 pubsub_message = received_message['message']
98 crash_data = json.loads(base64.b64decode(pubsub_message['data'])) 100 crash_data = json.loads(base64.b64decode(pubsub_message['data']))
99 101
100 logging.info('Processing message %s from subscription %s.', 102 logging.info('Processing message %s from subscription %s.',
101 pubsub_message['message_id'], 103 pubsub_message['message_id'],
102 received_message['subscription']) 104 received_message['subscription'])
105 logging.info('Crash data is %s', json.dumps(crash_data))
106 ScheduleNewAnalysis(crash_data)
103 107
104 logging.info('Crash data is %s', json.dumps(crash_data))
105
106 client_id = crash_data['client_id']
107 FinditForClientID(client_id).ScheduleNewAnalysis(crash_data,
108 queue_name=constants.CRASH_ANALYSIS_QUEUE[client_id])
109 except (KeyError, ValueError): # pragma: no cover. 108 except (KeyError, ValueError): # pragma: no cover.
110 # TODO: save exception in datastore and create a page to show them. 109 # TODO: save exception in datastore and create a page to show them.
111 logging.exception('Failed to process crash message') 110 logging.exception('Failed to process crash message')
112 logging.info(self.request.body) 111 logging.info(self.request.body)
112
113
114 # TODO(http://crbug.com/659345): try to break the cycle re pipeline_cls.
stgao 2016/11/02 01:28:27 This seems invalid now.
wrengr 2016/11/02 23:52:26 Done.
115 # TODO(http://crbug.com/659346): we don't cover anything after the
116 # call to _NeedsNewAnalysis.
117 def ScheduleNewAnalysis(crash_data):
118 """Create a pipeline object to perform the analysis, and start it.
stgao 2016/11/02 01:28:27 nit: Create -> Creates. This is the convention in
wrengr 2016/11/02 23:52:26 Done.
119
120 If we can detect that the analysis doesn't need to be performed
121 (e.g., it was already performed, or the ``crash_data`` is empty so
122 there's nothig we can do), then we will skip creating the pipeline
123 at all.
124
125 This method is only ever called from ``handlers.crash.crash_handler``
126 and so should surely be moved there instead of living here.
stgao 2016/11/02 01:28:27 invalid now.
wrengr 2016/11/02 23:52:26 Done.
127
128 Args:
129 crash_data (JSON): ??
130
131 Returns:
132 True if we started a new pipeline; False otherwise.
133 """
134 client_id = crash_data['client_id']
135 # N.B., must call FinditForClientID indirectly, for mock testing.
136 findit_client = crash_pipeline.FinditForClientID(client_id)
137
138 # Check policy and modify the crash_data as needed.
139 crash_data = findit_client.CheckPolicy(crash_data)
140 if crash_data is None:
141 return False
142
143 # Detect the regression range, and decide if we actually need to
144 # run a new anlaysis or not.
145 if not findit_client._NeedsNewAnalysis(crash_data):
146 return False
147
148 crash_identifiers = crash_data['crash_identifiers']
149 # N.B., we cannot pass ``self`` directly to the _pipeline_cls, because
150 # it is not JSON-serializable (and there's no way to make it such,
151 # since JSON-serializability is defined by JSON-encoders rather than
152 # as methods on the objects being encoded).
153 pipeline = CrashWrapperPipeline(client_id, crash_identifiers)
154 # Attribute defined outside __init__ - pylint: disable=W0201
155 pipeline.target = appengine_util.GetTargetNameForModule(
156 constants.CRASH_BACKEND[client_id])
157 queue_name = constants.CRASH_ANALYSIS_QUEUE[client_id]
158 pipeline.start(queue_name=queue_name)
159 logging.info('New %s analysis is scheduled for %s', client_id,
160 repr(crash_identifiers))
161 return True
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698