OLD | NEW |
---|---|
1 # Copyright 2015 The Chromium Authors. All rights reserved. | 1 # Copyright 2016 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 """URL endpoint for a cron job to update bugs after bisects.""" | 5 """URL endpoint for a cron job to update bugs after bisects.""" |
6 | 6 |
7 import datetime | 7 import datetime |
8 import json | 8 import json |
9 import logging | 9 import logging |
10 import re | 10 import re |
11 import traceback | 11 import traceback |
12 import urllib | 12 |
13 | |
14 from google.appengine.api import app_identity | |
15 from google.appengine.api import mail | 13 from google.appengine.api import mail |
16 from google.appengine.api import urlfetch | |
17 from google.appengine.api import urlfetch_errors | |
18 from google.appengine.ext import ndb | 14 from google.appengine.ext import ndb |
19 | 15 |
20 from dashboard import bisect_fyi | 16 from dashboard import bisect_fyi |
21 from dashboard import buildbucket_service | 17 from dashboard import bisect_report |
22 from dashboard import datastore_hooks | 18 from dashboard import datastore_hooks |
23 from dashboard import email_template | 19 from dashboard import email_template |
24 from dashboard import issue_tracker_service | 20 from dashboard import issue_tracker_service |
25 from dashboard import layered_cache | 21 from dashboard import layered_cache |
26 from dashboard import quick_logger | 22 from dashboard import quick_logger |
27 from dashboard import request_handler | 23 from dashboard import request_handler |
28 from dashboard import rietveld_service | 24 from dashboard import rietveld_service |
29 from dashboard import start_try_job | |
30 from dashboard import utils | 25 from dashboard import utils |
31 from dashboard.models import anomaly | 26 from dashboard.models import anomaly |
32 from dashboard.models import bug_data | 27 from dashboard.models import bug_data |
33 from dashboard.models import try_job | 28 from dashboard.models import try_job |
34 | 29 |
35 # Try job status codes from rietveld (see TryJobResult in codereview/models.py) | 30 COMPLETED, FAILED, PENDING = 'completed', 'failed', 'pending' |
36 SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION, RETRY, TRYPENDING = range(7) | |
37 # Not a status code from rietveld, added for completeness of the possible | |
38 # statuses a job can be in. | |
39 STARTED = -1 | |
40 OK = (SUCCESS, WARNINGS, SKIPPED) | |
41 FAIL = (FAILURE, EXCEPTION) | |
42 | 31 |
43 _COMMIT_HASH_CACHE_KEY = 'commit_hash_%s' | 32 _COMMIT_HASH_CACHE_KEY = 'commit_hash_%s' |
44 | 33 |
45 _CONFIDENCE_THRESHOLD = 99.5 | |
46 | |
47 # Timeout in minutes set by buildbot for trybots. | |
48 _BISECT_BOT_TIMEOUT = 12 * 60 | |
49 | |
50 # Amount of time to pass before deleting a try job. | 34 # Amount of time to pass before deleting a try job. |
51 _STALE_TRYJOB_DELTA = datetime.timedelta(days=7) | 35 _STALE_TRYJOB_DELTA = datetime.timedelta(days=7) |
52 | 36 |
53 # Amount of time pass before deleteing try jobs that use Buildbucket. | |
54 _STALE_TRYJOB_DELTA_BUILDBUCKET = datetime.timedelta(days=21) | |
55 | |
56 _BUG_COMMENT_TEMPLATE = """Bisect job status: %(status)s | |
57 Bisect job ran on: %(bisect_bot)s | |
58 | |
59 %(results)s | |
60 | |
61 Buildbot stdio: %(buildbot_log_url)s | |
62 Job details: %(issue_url)s | |
63 """ | |
64 | |
65 _AUTO_ASSIGN_MSG = """ | 37 _AUTO_ASSIGN_MSG = """ |
66 ==== Auto-CCing suspected CL author %(author)s ==== | 38 === Auto-CCing suspected CL author %(author)s === |
67 | 39 |
68 Hi %(author)s, the bisect results pointed to your CL below as possibly | 40 Hi %(author)s, the bisect results pointed to your CL below as possibly |
69 causing a regression. Please have a look at this info and see whether | 41 causing a regression. Please have a look at this info and see whether |
70 your CL be related. | 42 your CL be related. |
71 | 43 |
72 """ | 44 """ |
73 | 45 |
74 | 46 |
75 class UnexpectedJsonError(Exception): | |
76 pass | |
77 | |
78 | |
79 class BugUpdateFailure(Exception): | 47 class BugUpdateFailure(Exception): |
80 pass | 48 pass |
81 | 49 |
82 | 50 |
83 class UpdateBugWithResultsHandler(request_handler.RequestHandler): | 51 class UpdateBugWithResultsHandler(request_handler.RequestHandler): |
84 """URL endpoint for a cron job to update bugs after bisects.""" | 52 """URL endpoint for a cron job to update bugs after bisects.""" |
85 | 53 |
86 def get(self): | 54 def get(self): |
87 """The get handler method is called from a cron job. | 55 """The get handler method is called from a cron job. |
88 | 56 |
89 It expects no parameters and has no output. It checks all current bisect try | 57 It expects no parameters and has no output. It checks all current bisect try |
90 jobs and send comments to an issue on the issue tracker if a bisect job has | 58 jobs and send comments to an issue on the issue tracker if a bisect job has |
91 completed. | 59 completed. |
92 """ | 60 """ |
93 credentials = rietveld_service.Credentials( | 61 credentials = rietveld_service.Credentials( |
94 rietveld_service.GetDefaultRietveldConfig(), | 62 rietveld_service.GetDefaultRietveldConfig(), |
95 rietveld_service.PROJECTHOSTING_SCOPE) | 63 rietveld_service.PROJECTHOSTING_SCOPE) |
96 issue_tracker = issue_tracker_service.IssueTrackerService( | 64 issue_tracker = issue_tracker_service.IssueTrackerService( |
97 additional_credentials=credentials) | 65 additional_credentials=credentials) |
98 | 66 |
99 # Set privilege so we can also fetch internal try_job entities. | 67 # Set privilege so we can also fetch internal try_job entities. |
100 datastore_hooks.SetPrivilegedRequest() | 68 datastore_hooks.SetPrivilegedRequest() |
101 | 69 |
102 jobs_to_check = try_job.TryJob.query( | 70 jobs_to_check = try_job.TryJob.query( |
103 try_job.TryJob.status == 'started').fetch() | 71 try_job.TryJob.status == 'started').fetch() |
104 all_successful = True | 72 all_successful = True |
73 | |
105 for job in jobs_to_check: | 74 for job in jobs_to_check: |
106 try: | 75 try: |
107 if job.use_buildbucket: | |
108 logging.info('Checking job %s with Buildbucket job ID %s.', | |
109 job.key.id(), getattr(job, 'buildbucket_job_id', None)) | |
110 else: | |
111 logging.info('Checking job %s with Rietveld issue ID %s.', | |
112 job.key.id(), getattr(job, 'rietveld_issue_id', None)) | |
113 _CheckJob(job, issue_tracker) | 76 _CheckJob(job, issue_tracker) |
114 except Exception as e: # pylint: disable=broad-except | 77 except Exception as e: # pylint: disable=broad-except |
115 logging.error('Caught Exception %s: %s\n%s', | 78 logging.error('Caught Exception %s: %s\n%s', |
116 type(e).__name__, e, traceback.format_exc()) | 79 type(e).__name__, e, traceback.format_exc()) |
117 all_successful = False | 80 all_successful = False |
81 | |
118 if all_successful: | 82 if all_successful: |
119 utils.TickMonitoringCustomMetric('UpdateBugWithResults') | 83 utils.TickMonitoringCustomMetric('UpdateBugWithResults') |
120 | 84 |
121 | 85 |
122 def _CheckJob(job, issue_tracker): | 86 def _CheckJob(job, issue_tracker): |
123 """Checks whether a try job is finished and updates a bug if applicable. | 87 """Checks whether a try job is finished and updates a bug if applicable. |
124 | 88 |
125 This method returns nothing, but it may log errors. | 89 This method returns nothing, but it may log errors. |
126 | 90 |
127 Args: | 91 Args: |
128 job: A TryJob entity, which represents one bisect try job. | 92 job: A TryJob entity, which represents one bisect try job. |
129 issue_tracker: An issue_tracker_service.IssueTrackerService instance. | 93 issue_tracker: An issue_tracker_service.IssueTrackerService instance. |
130 """ | 94 """ |
131 # Give up on stale try job. | 95 if _IsStale(job): |
132 if job.use_buildbucket: | 96 job.SetStaled() |
133 stale_delta = _STALE_TRYJOB_DELTA_BUILDBUCKET | 97 UpdateQuickLog(job) |
134 else: | 98 # TODO(chrisphan): Do we want to send a FYI Bisect email here? |
135 stale_delta = _STALE_TRYJOB_DELTA | 99 return |
136 if (job.last_ran_timestamp and | 100 |
137 job.last_ran_timestamp < datetime.datetime.now() - stale_delta): | 101 results_data = job.results_data |
138 comment = 'Stale bisect job, will stop waiting for results.' | 102 if not results_data or results_data['status'] not in [COMPLETED, FAILED]: |
139 comment += 'Rietveld issue: %s' % job.rietveld_issue_id | |
140 start_try_job.LogBisectResult(job.bug_id, comment) | |
141 job.SetFailed() | |
142 return | 103 return |
143 | 104 |
144 if job.job_type == 'perf-try': | 105 if job.job_type == 'perf-try': |
145 _CheckPerfTryJob(job) | 106 _SendPerfTryJobEmail(job) |
146 elif job.job_type == 'bisect-fyi': | 107 elif job.job_type == 'bisect-fyi': |
147 _CheckFYIBisectJob(job, issue_tracker) | 108 _CheckFYIBisectJob(job, issue_tracker) |
148 else: | 109 else: |
149 # Delete bisect jobs that aren't associated with any bug id. | |
150 if job.bug_id is None or job.bug_id < 0: | |
151 job.key.delete() | |
152 return | |
153 _CheckBisectJob(job, issue_tracker) | 110 _CheckBisectJob(job, issue_tracker) |
154 | 111 |
155 | 112 if results_data['status'] == COMPLETED: |
156 def _CheckPerfTryJob(job): | 113 job.SetCompleted() |
157 perf_results = _GetPerfTryResults(job) | 114 else: |
158 if not perf_results: | 115 job.SetFailed() |
159 return | 116 |
160 _SendPerfTryJobEmail(job, perf_results) | 117 |
161 job.SetCompleted() | 118 def _CheckBisectJob(job, issue_tracker): |
162 | 119 results_data = job.results_data |
163 | 120 has_partial_result = ('revision_data' in results_data and |
164 def _SendPerfTryJobEmail(job, perf_results): | 121 results_data['revision_data']) |
122 if results_data['status'] == FAILED and not has_partial_result: | |
123 return | |
124 _PostResult(job, issue_tracker) | |
125 | |
126 | |
127 def _CheckFYIBisectJob(job, issue_tracker): | |
128 try: | |
129 _PostResult(job, issue_tracker) | |
130 error_message = bisect_fyi.VerifyBisectFYIResults(job) | |
131 if not bisect_fyi.IsBugUpdated(job, issue_tracker): | |
132 error_message += '\nFailed to update bug with bisect results.' | |
133 except BugUpdateFailure as e: | |
134 error_message = 'Failed to update bug with bisect results: %s' % e | |
135 if job.results_data['status'] == FAILED or error_message: | |
136 _SendFYIBisectEmail(job, error_message) | |
137 | |
138 | |
139 def _SendPerfTryJobEmail(job): | |
165 """Sends an email to the user who started the perf try job.""" | 140 """Sends an email to the user who started the perf try job.""" |
166 to = [job.email] if job.email else [] | 141 if not job.email: |
167 if not to: | 142 return |
168 logging.error('No "email" in job data. %s.', job.rietveld_issue_id) | 143 email_report = email_template.GetPerfTryJobEmailReport(job) |
169 return | 144 if not email_report: |
170 | 145 return |
171 perf_email = email_template.GetPerfTryJobEmail(perf_results) | |
172 if not perf_email: | |
173 logging.error('Failed to create "perf_email" from result data. %s.' | |
174 ' Results data: %s', job.rietveld_issue_id, perf_results) | |
175 return | |
176 | |
177 mail.send_mail(sender='gasper-alerts@google.com', | 146 mail.send_mail(sender='gasper-alerts@google.com', |
178 to=','.join(to), | 147 to=job.email, |
179 subject=perf_email['subject'], | 148 subject=email_report['subject'], |
180 body=perf_email['body'], | 149 body=email_report['body'], |
181 html=perf_email['html']) | 150 html=email_report['html']) |
182 | 151 |
183 | 152 |
184 def _ParseCloudLinksFromOutput(output): | 153 def _PostResult(job, issue_tracker): |
185 """Extracts cloud storage URLs from text.""" | 154 """Posts bisect results on issue tracker.""" |
186 html_results_pattern = re.compile( | |
187 r'@@@STEP_LINK@HTML Results@(?P<link>http://storage.googleapis.com/' | |
188 'chromium-telemetry/html-results/results-[a-z0-9-_]+)@@@', | |
189 re.MULTILINE) | |
190 profiler_pattern = re.compile( | |
191 r'@@@STEP_LINK@(?P<title>[^@]+)@(?P<link>https://console.developers.' | |
192 'google.com/m/cloudstorage/b/[a-z-]+/o/profiler-[a-z0-9-_.]+)@@@', | |
193 re.MULTILINE) | |
194 | |
195 links = { | |
196 'html-results': html_results_pattern.findall(output), | |
197 'profiler': profiler_pattern.findall(output), | |
198 } | |
199 | |
200 return links | |
201 | |
202 | |
203 def _LoadConfigFromString(contents): | |
204 try: | |
205 # The config should be in the following format: | |
206 # config = {'foo': 'foo'} | |
207 # So we really just need to strip off the "config" part. | |
208 json_contents = str(contents).split('{')[1].split('}')[0] | |
209 json_contents = json_contents.replace("'", '\"') | |
210 json_contents = '{%s}' % json_contents | |
211 return json.loads(json_contents) | |
212 except (IndexError, ValueError, AttributeError): | |
213 logging.error('Could not parse config contents: %s', contents) | |
214 return None | |
215 | |
216 | |
217 def _GetPerfTryResults(job): | |
218 """Gets perf results for a perf try job. | |
219 | |
220 Args: | |
221 job: TryJob entity. | |
222 | |
223 Returns: | |
224 A dictionary containing status, results, buildbot_log_url, and | |
225 issue_url for this bisect job, None if perf try job is pending or | |
226 there's an error fetching run data. | |
227 """ | |
228 results = {} | |
229 # Fetch bisect bot results from Rietveld server. | |
230 response = _FetchRietveldIssueJSON(job) | |
231 issue_url = _RietveldIssueURL(job) | |
232 try_job_info = _ValidateRietveldResponse(response) | |
233 | |
234 results['buildbot_log_url'] = str(try_job_info['url']) | |
235 results['issue_url'] = str(issue_url) | |
236 | |
237 # Check whether the bisect job is finished or not and fetch the output. | |
238 result = int(try_job_info['result']) | |
239 if result not in OK + FAIL: | |
240 return None | |
241 | |
242 results_url = ('%s/steps/Running%%20Bisection/logs/stdio/text' % | |
243 try_job_info['url']) | |
244 response = _FetchURL(results_url, skip_status_code=True) | |
245 results['bisect_bot'] = try_job_info['builder'] | |
246 results['config'] = _LoadConfigFromString(job.config) | |
247 | |
248 if not results['config']: | |
249 results['status'] = 'Failure' | |
250 return results | |
251 | |
252 # We don't see content for "Result" step. Bot probably did not get there. | |
253 if not response or response.status_code != 200: | |
254 results['status'] = 'Failure' | |
255 return results | |
256 | |
257 links = _ParseCloudLinksFromOutput(response.content) | |
258 | |
259 results['html_results'] = (links['html-results'][0] | |
260 if links['html-results'] else '') | |
261 results['profiler_results'] = links['profiler'] | |
262 results['status'] = 'Completed' | |
263 | |
264 return results | |
265 | |
266 | |
267 def _CheckBisectJob(job, issue_tracker): | |
268 bisect_results = _GetBisectResults(job) | |
269 if not bisect_results: | |
270 logging.info('No bisect results, job may be pending.') | |
271 return | |
272 logging.info('Bisect job status: %s.', bisect_results['status']) | |
273 if bisect_results['status'] == 'Completed': | |
274 _PostSucessfulResult(job, bisect_results, issue_tracker) | |
275 job.SetCompleted() | |
276 elif bisect_results['status'] == 'Failure with partial results': | |
277 _PostFailedResult( | |
278 job, bisect_results, issue_tracker, add_bug_comment=True) | |
279 job.SetFailed() | |
280 elif bisect_results['status'] == 'Failure': | |
281 _PostFailedResult(job, bisect_results, issue_tracker) | |
282 job.SetFailed() | |
283 | |
284 | |
285 def _GetBisectResults(job): | |
286 """Gets bisect results for a bisect job. | |
287 | |
288 Args: | |
289 job: TryJob entity. | |
290 | |
291 Returns: | |
292 A dictionary containing status, results, buildbot_log_url, and | |
293 issue_url for this bisect job. The issue_url may be a link to a Rietveld | |
294 issue or to Buildbucket job info. | |
295 """ | |
296 results = {} | |
297 # Fetch bisect bot results from Rietveld server. | |
298 if job.use_buildbucket: | |
299 try_job_info = _ValidateAndConvertBuildbucketResponse( | |
300 buildbucket_service.GetJobStatus(job.buildbucket_job_id), job) | |
301 hostname = app_identity.get_default_version_hostname() | |
302 job_id = job.buildbucket_job_id | |
303 issue_url = 'https://%s/buildbucket_job_status/%s' % (hostname, job_id) | |
304 else: | |
305 response = _FetchRietveldIssueJSON(job) | |
306 issue_url = _RietveldIssueURL(job) | |
307 try_job_info = _ValidateRietveldResponse(response) | |
308 | |
309 results['buildbot_log_url'] = str(try_job_info['url']) | |
310 results['issue_url'] = str(issue_url) | |
311 | |
312 # Check whether the bisect job is finished or not and fetch the output. | |
313 result = int(try_job_info['result']) | |
314 if result not in OK + FAIL: | |
315 return None | |
316 | |
317 results_url = '%s/steps/Results/logs/stdio/text' % try_job_info['url'] | |
318 response = _FetchURL(results_url, skip_status_code=True) | |
319 results['bisect_bot'] = try_job_info['builder'] | |
320 # We don't see content for "Result" step. Bot probably did not get there. | |
321 if not response or response.status_code != 200: | |
322 results['status'] = 'Failure' | |
323 results['results'] = '' | |
324 build_data = _FetchBuildData(try_job_info['url']) | |
325 if build_data: | |
326 _CheckBisectBotForInfraFailure(job.bug_id, build_data, | |
327 try_job_info['url']) | |
328 results['results'] = _GetBotFailureInfo(build_data) | |
329 partial_result = _GetPartialBisectResult(build_data, try_job_info['url']) | |
330 if partial_result: | |
331 results['status'] = 'Failure with partial results' | |
332 results['results'] += partial_result | |
333 return results | |
334 | |
335 # Clean result. | |
336 # If the bisect_results string contains any non-ASCII characters, | |
337 # converting to string should prevent an error from being raised. | |
338 bisect_result = _BeautifyContent(str(response.content)) | |
339 | |
340 # Bisect is considered success if result is provided. | |
341 # "BISECTION ABORTED" is added when a job is ealy aborted because the | |
342 # associated issue was closed. | |
343 # TODO(robertocn): Make sure we are outputting this string | |
344 if ('BISECT JOB RESULTS' in bisect_result or | |
345 'BISECTION ABORTED' in bisect_result): | |
346 results['status'] = 'Completed' | |
347 else: | |
348 results['status'] = 'Failure' | |
349 | |
350 results['results'] = bisect_result | |
351 return results | |
352 | |
353 | |
354 def _FetchBuildData(build_url): | |
355 """Fetches build data from buildbot json api. | |
356 | |
357 For json api examples see: | |
358 http://build.chromium.org/p/tryserver.chromium.perf/json/help | |
359 | |
360 Args: | |
361 build_url: URL to a Buildbot bisect tryjob. | |
362 | |
363 Returns: | |
364 A dictionary of build data for a bisect tryjob. None if there's an | |
365 error fetching build data. | |
366 """ | |
367 index = build_url.find('/builders/') | |
368 if index == -1: | |
369 logging.error('Build url does not contain expected "/builders/" to ' | |
370 'fetch json data. URL: %s.', build_url) | |
371 return None | |
372 | |
373 # Fetch and verify json data. | |
374 json_build_url = build_url[:index] + '/json' + build_url[index:] | |
375 response = _FetchURL(json_build_url) | |
376 if not response: | |
377 logging.error('Could not fetch json data from %s.', json_build_url) | |
378 return None | |
379 try: | |
380 build_data = json.loads(response.content) | |
381 if (not build_data or | |
382 not build_data.get('steps') or | |
383 not build_data.get('times') or | |
384 not build_data.get('text')): | |
385 raise ValueError('Expected properties not found in build data: %s.' % | |
386 build_data) | |
387 except ValueError, e: | |
388 logging.error('Response from builder could not be parsed as JSON. ' | |
389 'URL: %s. Error: %s.', json_build_url, e) | |
390 return None | |
391 return build_data | |
392 | |
393 | |
394 def _GetBotFailureInfo(build_data): | |
395 """Returns helpful message about failed bisect runs.""" | |
396 message = '' | |
397 | |
398 # Add success rate message. | |
399 build_steps = build_data['steps'] | |
400 num_success_build = 0 | |
401 total_build = 0 | |
402 for step in build_steps: | |
403 # 'Working on' is the step name for bisect run for a build. | |
404 if 'Working on' in step['name']: | |
405 if step['results'][0] in (SUCCESS, WARNINGS): | |
406 num_success_build += 1 | |
407 total_build += 1 | |
408 message += 'Completed %s/%s builds.\n' % (num_success_build, total_build) | |
409 | |
410 # Add run time messsage. | |
411 run_time = build_data['times'][1] - build_data['times'][0] | |
412 run_time = int(run_time / 60) # Minutes. | |
413 message += 'Run time: %s/%s minutes.\n' % (run_time, _BISECT_BOT_TIMEOUT) | |
414 if run_time >= _BISECT_BOT_TIMEOUT: | |
415 message += 'Bisect timed out! Try again with a smaller revision range.\n' | |
416 | |
417 # Add failed steps message. | |
418 # 'text' field has the following properties: | |
419 # text":["failed","slave_steps","failed","Working on [b92af3931458f2]"] | |
420 status_list = build_data['text'] | |
421 if status_list[0] == 'failed': | |
422 message += 'Failed steps: %s\n\n' % ', '.join(status_list[1::2]) | |
423 | |
424 return message | |
425 | |
426 | |
427 def _GetPartialBisectResult(build_data, build_url): | |
428 """Gets partial bisect result if there's any. | |
429 | |
430 For bisect result output format see: | |
431 https://chromium.googlesource.com/chromium/src/+/master/tools/ | |
432 auto_bisect/bisect_perf_regression.py | |
433 | |
434 Args: | |
435 build_data: A dictionary of build data for a bisect tryjob. | |
436 build_url: URL to a Buildbot bisect tryjob. | |
437 | |
438 Returns: | |
439 String result of bisect job. | |
440 """ | |
441 build_steps = build_data['steps'] | |
442 # Search for the last successful bisect step. | |
443 pattern = re.compile(r'===== PARTIAL RESULTS =====(.*)\n\n', re.DOTALL) | |
444 for step in reversed(build_steps): | |
445 # 'Working on' is the step name for bisect run for a build. | |
446 if ('Working on' in step['name'] and | |
447 step['results'][0] in (SUCCESS, WARNINGS)): | |
448 stdio_url = ('%s/steps/%s/logs/stdio/text' % | |
449 (build_url, urllib.quote(step['name']))) | |
450 response = _FetchURL(stdio_url) | |
451 if response: | |
452 match = pattern.search(response.content) | |
453 if match: | |
454 return _BeautifyContent(match.group()) | |
455 return None | |
456 | |
457 | |
458 def _PostFailedResult( | |
459 job, bisect_results, issue_tracker, add_bug_comment=False): | |
460 """Posts failed bisect results on logger and optional issue tracker.""" | |
461 comment = _BUG_COMMENT_TEMPLATE % bisect_results | |
462 if add_bug_comment: | |
463 # Set restrict view label if the bisect results are internal only. | |
464 labels = ['Restrict-View-Google'] if job.internal_only else None | |
465 added_comment = issue_tracker.AddBugComment( | |
466 job.bug_id, comment, labels=labels) | |
467 if not added_comment: | |
468 raise BugUpdateFailure('Failed to update bug %s with comment %s' | |
469 % (job.bug_id, comment)) | |
470 start_try_job.LogBisectResult(job.bug_id, comment) | |
471 logging.info('Updated bug %s with results from %s', | |
472 job.bug_id, job.rietveld_issue_id) | |
473 | |
474 | |
475 def _PostSucessfulResult(job, bisect_results, issue_tracker): | |
476 """Posts successful bisect results on logger and issue tracker.""" | |
477 # From the results, get the list of people to CC (if applicable), the bug | 155 # From the results, get the list of people to CC (if applicable), the bug |
478 # to merge into (if applicable) and the commit hash cache key, which | 156 # to merge into (if applicable) and the commit hash cache key, which |
479 # will be used below. | 157 # will be used below. |
158 results_data = job.results_data | |
480 authors_to_cc = [] | 159 authors_to_cc = [] |
481 merge_issue = None | 160 merge_issue = None |
482 bug = ndb.Key('Bug', job.bug_id).get() | 161 bug = ndb.Key('Bug', job.bug_id).get() |
483 | 162 |
484 commit_cache_key = _GetCommitHashCacheKey(bisect_results['results']) | 163 commit_cache_key = _GetCommitHashCacheKey(results_data) |
485 if bug: | 164 if bug: |
486 merge_issue = layered_cache.GetExternal(commit_cache_key) | 165 merge_issue = layered_cache.GetExternal(commit_cache_key) |
487 if not merge_issue: | 166 if not merge_issue: |
488 authors_to_cc = _GetAuthorsToCC(bisect_results['results']) | 167 authors_to_cc = _GetAuthorsToCC(results_data) |
489 | 168 |
490 comment = _BUG_COMMENT_TEMPLATE % bisect_results | 169 comment = bisect_report.GetReport(job) |
491 | 170 |
492 # Add a friendly message to author of culprit CL. | 171 # Add a friendly message to author of culprit CL. |
493 owner = None | 172 owner = None |
494 if authors_to_cc: | 173 if authors_to_cc: |
495 comment = '%s%s' % (_AUTO_ASSIGN_MSG % {'author': authors_to_cc[0]}, | 174 comment = '%s%s' % (_AUTO_ASSIGN_MSG % {'author': authors_to_cc[0]}, |
496 comment) | 175 comment) |
497 owner = authors_to_cc[0] | 176 owner = authors_to_cc[0] |
498 # Set restrict view label if the bisect results are internal only. | 177 # Set restrict view label if the bisect results are internal only. |
499 labels = ['Restrict-View-Google'] if job.internal_only else None | 178 labels = ['Restrict-View-Google'] if job.internal_only else None |
500 added_comment = issue_tracker.AddBugComment( | 179 comment_added = issue_tracker.AddBugComment( |
501 job.bug_id, comment, cc_list=authors_to_cc, merge_issue=merge_issue, | 180 job.bug_id, comment, cc_list=authors_to_cc, merge_issue=merge_issue, |
502 labels=labels, owner=owner) | 181 labels=labels, owner=owner) |
503 if not added_comment: | 182 if not comment_added: |
504 raise BugUpdateFailure('Failed to update bug %s with comment %s' | 183 raise BugUpdateFailure('Failed to update bug %s with comment %s' |
505 % (job.bug_id, comment)) | 184 % (job.bug_id, comment)) |
506 | 185 |
507 start_try_job.LogBisectResult(job.bug_id, comment) | |
508 logging.info('Updated bug %s with results from %s', | 186 logging.info('Updated bug %s with results from %s', |
509 job.bug_id, job.rietveld_issue_id) | 187 job.bug_id, job.rietveld_issue_id) |
510 | 188 |
511 if merge_issue: | 189 if merge_issue: |
512 _MapAnomaliesToMergeIntoBug(merge_issue, job.bug_id) | 190 _MapAnomaliesToMergeIntoBug(merge_issue, job.bug_id) |
513 # Mark the duplicate bug's Bug entity status as closed so that | 191 # Mark the duplicate bug's Bug entity status as closed so that |
514 # it doesn't get auto triaged. | 192 # it doesn't get auto triaged. |
515 bug.status = bug_data.BUG_STATUS_CLOSED | 193 bug.status = bug_data.BUG_STATUS_CLOSED |
516 bug.put() | 194 bug.put() |
517 | 195 |
518 # Cache the commit info and bug ID to datastore when there is no duplicate | 196 # Cache the commit info and bug ID to datastore when there is no duplicate |
519 # issue that this issue is getting merged into. This has to be done only | 197 # issue that this issue is getting merged into. This has to be done only |
520 # after the issue is updated successfully with bisect information. | 198 # after the issue is updated successfully with bisect information. |
521 if commit_cache_key and not merge_issue: | 199 if commit_cache_key and not merge_issue: |
522 layered_cache.SetExternal(commit_cache_key, str(job.bug_id), | 200 layered_cache.SetExternal(commit_cache_key, str(job.bug_id), |
523 days_to_keep=30) | 201 days_to_keep=30) |
524 logging.info('Cached bug id %s and commit info %s in the datastore.', | 202 logging.info('Cached bug id %s and commit info %s in the datastore.', |
525 job.bug_id, commit_cache_key) | 203 job.bug_id, commit_cache_key) |
526 | 204 |
527 | 205 |
528 def _ValidateAndConvertBuildbucketResponse(job_info, job=None): | 206 def _IsStale(job): |
529 """Checks the response from the buildbucket service and converts it. | 207 if not job.last_ran_timestamp: |
530 | 208 return False |
531 The response is converted to a similar format to that used by Rietveld for | 209 time_since_last_ran = datetime.datetime.now() - job.last_ran_timestamp |
532 backwards compatibility. | 210 return time_since_last_ran > _STALE_TRYJOB_DELTA |
533 | |
534 Args: | |
535 job_info: A dictionary containing the response from the buildbucket service. | |
536 job: Bisect TryJob entity object. | |
537 | |
538 Returns: | |
539 Try job info dict in the same format as _ValidateRietveldResponse; will | |
540 have the keys "url", "results", and "bisect_bot". | |
541 | |
542 Raises: | |
543 UnexpectedJsonError: The format was not as expected. | |
544 """ | |
545 job_info = job_info['build'] | |
546 json_response = json.dumps(job_info) | |
547 if not job_info: | |
548 raise UnexpectedJsonError('No response from Buildbucket.') | |
549 if job_info.get('result') is None: | |
550 raise UnexpectedJsonError('No "result" in try job results. ' | |
551 'Buildbucket response: %s' % json_response) | |
552 # This is a case where the buildbucket job was triggered but never got | |
553 # scheduled on buildbot probably due to long pending job queue. | |
554 if (job_info.get('status') == 'COMPLETED' and | |
555 job_info.get('result') == 'CANCELED' and | |
556 job_info.get('cancelation_reason') == 'TIMEOUT'): | |
557 job.SetFailed() | |
558 raise UnexpectedJsonError('Try job timed out before it got scheduled. ' | |
559 'Buildbucket response: %s' % json_response) | |
560 | |
561 # This is a case where the buildbucket job failed due to invalid config. | |
562 if (job_info.get('status') == 'COMPLETED' and | |
563 job_info.get('result') == 'FAILURE' and | |
564 job_info.get('failure_reason') != 'BUILD_FAILURE'): | |
565 job.SetFailed() | |
566 job.key.delete() | |
567 raise UnexpectedJsonError('Invalid bisect configuration. ' | |
568 'Buildbucket response: %s' % json_response) | |
569 | |
570 if job_info.get('url') is None: | |
571 raise UnexpectedJsonError('No "url" in try job results. This could mean ' | |
572 'that the job has not started. ' | |
573 'Buildbucket response: %s' % json_response) | |
574 try: | |
575 result_details = json.loads(job_info['result_details_json']) | |
576 bisect_config = result_details['properties']['bisect_config'] | |
577 job_info['builder'] = bisect_config['recipe_tester_name'] | |
578 except (KeyError, ValueError, TypeError): | |
579 # If the tester name isn't found here, this is unexpected but non-fatal. | |
580 job_info['builder'] = 'Unknown' | |
581 logging.error('Failed to extract tester name from JSON: %s', json_response) | |
582 job_info['result'] = _BuildbucketStatusToStatusConstant( | |
583 job_info['status'], job_info['result']) | |
584 return job_info | |
585 | |
586 | |
587 def _ValidateRietveldResponse(response): | |
588 """Checks the response from Rietveld to see if the JSON format is right. | |
589 | |
590 Args: | |
591 response: A Response object, should have a string content attribute. | |
592 | |
593 Returns: | |
594 Try job info dict, guaranteed to have the keys "url" and "result". | |
595 | |
596 Raises: | |
597 UnexpectedJsonError: The format was not as expected. | |
598 """ | |
599 if not response: | |
600 raise UnexpectedJsonError('No response from Rietveld.') | |
601 try: | |
602 issue_data = json.loads(response.content) | |
603 except ValueError: | |
604 raise UnexpectedJsonError('Response from Rietveld could not be parsed ' | |
605 'as JSON: %s' % response.content) | |
606 # Check whether we can get the results from the issue data response. | |
607 if not issue_data.get('try_job_results'): | |
608 raise UnexpectedJsonError('Empty "try_job_results" in Rietveld response. ' | |
609 'Response: %s.' % response.content) | |
610 try_job_info = issue_data['try_job_results'][0] | |
611 if not try_job_info: | |
612 raise UnexpectedJsonError('Empty item in try job results. ' | |
613 'Rietveld response: %s' % response.content) | |
614 if try_job_info.get('result') is None: | |
615 raise UnexpectedJsonError('No "result" in try job results. ' | |
616 'Rietveld response: %s' % response.content) | |
617 if try_job_info.get('url') is None: | |
618 raise UnexpectedJsonError('No "url" in try job results. This could mean ' | |
619 'that the job has not started. ' | |
620 'Rietveld response: %s' % response.content) | |
621 return try_job_info | |
622 | 211 |
623 | 212 |
624 def _MapAnomaliesToMergeIntoBug(dest_bug_id, source_bug_id): | 213 def _MapAnomaliesToMergeIntoBug(dest_bug_id, source_bug_id): |
625 """Maps anomalies from source bug to destination bug. | 214 """Maps anomalies from source bug to destination bug. |
626 | 215 |
627 Args: | 216 Args: |
628 dest_bug_id: Merge into bug (base bug) number. | 217 dest_bug_id: Merge into bug (base bug) number. |
629 source_bug_id: The bug to be merged. | 218 source_bug_id: The bug to be merged. |
630 """ | 219 """ |
631 query = anomaly.Anomaly.query( | 220 query = anomaly.Anomaly.query( |
632 anomaly.Anomaly.bug_id == int(source_bug_id)) | 221 anomaly.Anomaly.bug_id == int(source_bug_id)) |
633 anomalies = query.fetch() | 222 anomalies = query.fetch() |
634 for anomaly_entity in anomalies: | 223 for anomaly_entity in anomalies: |
635 anomaly_entity.bug_id = int(dest_bug_id) | 224 anomaly_entity.bug_id = int(dest_bug_id) |
636 ndb.put_multi(anomalies) | 225 ndb.put_multi(anomalies) |
637 | 226 |
638 | 227 |
639 def _CheckBisectBotForInfraFailure(bug_id, build_data, build_url): | 228 def _GetCommitHashCacheKey(results_data): |
640 """Logs bisect failures related to infrastructure. | |
641 | |
642 Args: | |
643 bug_id: Bug number. | |
644 build_data: A dictionary of build data for a bisect tryjob. | |
645 build_url: URL to a Buildbot bisect tryjob. | |
646 | |
647 TODO(chrisphan): Remove this once we get an idea of the rate of infra related | |
648 failures. | |
649 """ | |
650 build_steps = build_data['steps'] | |
651 | |
652 # If there's no bisect scripts step then it is considered infra issue. | |
653 slave_step_index = _GetBisectScriptStepIndex(build_steps) | |
654 if not slave_step_index: | |
655 _LogBisectInfraFailure(bug_id, 'Bot failure.', build_url) | |
656 return | |
657 | |
658 # Timeout failure is our problem. | |
659 run_time = build_data['times'][1] - build_data['times'][0] | |
660 run_time = int(run_time / 60) # Minutes. | |
661 if run_time >= _BISECT_BOT_TIMEOUT: | |
662 return | |
663 | |
664 # Any build failure is an infra issue. | |
665 # These flags are output by bisect_perf_regression.py. | |
666 build_failure_flags = [ | |
667 'Failed to build revision', | |
668 'Failed to produce build', | |
669 'Failed to perform pre-sync cleanup', | |
670 'Failed to sync', | |
671 'Failed to run [gclient runhooks]', | |
672 ] | |
673 slave_step = build_steps[slave_step_index] | |
674 stdio_url = ('%s/steps/%s/logs/stdio/text' % | |
675 (build_url, urllib.quote(slave_step['name']))) | |
676 response = _FetchURL(stdio_url) | |
677 if response: | |
678 for flag in build_failure_flags: | |
679 if flag in response.content: | |
680 _LogBisectInfraFailure(bug_id, 'Build failure.', build_url) | |
681 return | |
682 | |
683 | |
684 def _GetBisectScriptStepIndex(build_steps): | |
685 """Gets the index of step that run bisect script in build step data.""" | |
686 index = 0 | |
687 for step in build_steps: | |
688 if step['name'] in ['slave_steps', 'Running Bisection']: | |
689 return index | |
690 index += 1 | |
691 return None | |
692 | |
693 | |
694 def _LogBisectInfraFailure(bug_id, failure_message, stdio_url): | |
695 """Adds infrastructure related bisect failures to log.""" | |
696 comment = failure_message + '\n' | |
697 comment += ('<a href="https://chromeperf.appspot.com/group_report?' | |
698 'bug_id=%s">%s</a>\n' % (bug_id, bug_id)) | |
699 comment += 'Buildbot stdio: <a href="%s">%s</a>\n' % (stdio_url, stdio_url) | |
700 formatter = quick_logger.Formatter() | |
701 logger = quick_logger.QuickLogger('bisect_failures', 'infra', formatter) | |
702 logger.Log(comment) | |
703 logger.Save() | |
704 | |
705 | |
706 def _GetCommitHashCacheKey(results_output): | |
707 """Gets a commit hash cache key for the given bisect results output. | 229 """Gets a commit hash cache key for the given bisect results output. |
708 | 230 |
709 Args: | 231 Args: |
710 results_output: The bisect results output. | 232 results_data: Bisect results data. |
711 | 233 |
712 Returns: | 234 Returns: |
713 A string to use as a layered_cache key, or None if we don't want | 235 A string to use as a layered_cache key, or None if we don't want |
714 to merge any bugs based on this bisect result. | 236 to merge any bugs based on this bisect result. |
715 """ | 237 """ |
716 if not _BisectResultIsPositive(results_output): | 238 if results_data.get('culprit_data'): |
717 return None | 239 return _COMMIT_HASH_CACHE_KEY % results_data['culprit_data']['cl'] |
718 commits_list = re.findall(r'Commit : (.*)', results_output) | 240 return None |
719 if len(commits_list) != 1: | 241 |
720 return None | 242 def _GetAuthorsToCC(results_data): |
721 return _COMMIT_HASH_CACHE_KEY % commits_list[0].strip() | |
722 | |
723 | |
724 def _BisectResultIsPositive(results_output): | |
725 """Returns True if the bisect found a culprit with high confidence.""" | |
726 return 'Status: Positive' in results_output | |
727 | |
728 | |
729 def _GetAuthorsToCC(results_output): | |
730 """Makes a list of email addresses that we want to CC on the bug. | 243 """Makes a list of email addresses that we want to CC on the bug. |
731 | 244 |
732 TODO(qyearsley): Make sure that the bisect result bot doesn't cc | 245 TODO(qyearsley): Make sure that the bisect result bot doesn't cc |
733 non-googlers on Restrict-View-Google bugs. This might be done by making | 246 non-googlers on Restrict-View-Google bugs. This might be done by making |
734 a request for labels for the bug (or by making a request for alerts in | 247 a request for labels for the bug (or by making a request for alerts in |
735 the datastore for the bug id and checking the internal-only property). | 248 the datastore for the bug id and checking the internal-only property). |
736 | 249 |
737 Args: | 250 Args: |
738 results_output: The bisect results output. | 251 results_data: Bisect results data. |
739 | 252 |
740 Returns: | 253 Returns: |
741 A list of email addresses, possibly empty. | 254 A list of email addresses, possibly empty. |
742 """ | 255 """ |
743 author_lines = re.findall(r'Author : (.*)', results_output) | 256 culprit_data = results_data.get('culprit_data') |
744 unique_emails = set() | 257 if not culprit_data: |
745 for line in author_lines: | 258 return [] |
746 parts = line.split(',') | 259 emails = [culprit_data['email']] or [] |
qyearsley
2016/01/26 18:43:27
This will always be equivalent to [culprit_data['e
chrisphan
2016/02/09 20:34:40
Woops. Done.
| |
747 unique_emails.update(p.strip() for p in parts if '@' in p) | 260 emails.extend(_GetReviewersFromCulpritData(culprit_data)) |
748 emails = sorted(unique_emails) | |
749 | |
750 # Avoid CCing issue to multiple authors when bisect finds multiple | |
751 # different authors for culprits CLs. | |
752 if len(emails) > 1: | |
753 emails = [] | |
754 if len(emails) == 1: | |
755 # In addition to the culprit CL author, we also want to add reviewers | |
756 # of the culprit CL to the cc list. | |
757 emails.extend(_GetReviewersFromBisectLog(results_output)) | |
758 return emails | 261 return emails |
759 | 262 |
760 | 263 |
761 def _GetReviewersFromBisectLog(results_output): | 264 def _GetReviewersFromCulpritData(culprit_data): |
762 """Parse bisect log and gets reviewers email addresses from Rietveld issue. | 265 """Parse bisect log and gets reviewers email addresses from Rietveld issue. |
763 | 266 |
764 Note: This method doesn't get called when bisect reports multiple CLs by | 267 Note: This method doesn't get called when bisect reports multiple CLs by |
765 different authors, but will get called when there are multiple CLs by the | 268 different authors, but will get called when there are multiple CLs by the |
766 same owner. | 269 same owner. |
767 | 270 |
768 Args: | 271 Args: |
769 results_output: Bisect results output. | 272 culprit_data: Bisect results culprit data. |
770 | 273 |
771 Returns: | 274 Returns: |
772 List of email addresses from the committed CL. | 275 List of email addresses from the committed CL. |
773 """ | 276 """ |
277 | |
774 reviewer_list = [] | 278 reviewer_list = [] |
775 revisions_list = re.findall(r'Link : (.*)', results_output) | 279 revisions_links = culprit_data['revisions_links'] |
776 revisions_links = {rev.strip() for rev in revisions_list} | |
777 # Sometime revision page content consist of multiple "Review URL" strings | 280 # Sometime revision page content consist of multiple "Review URL" strings |
778 # due to some reverted CLs, such CLs are prefixed with ">"(>) symbols. | 281 # due to some reverted CLs, such CLs are prefixed with ">"(>) symbols. |
779 # Should only parse CL link correspoinding the revision found by the bisect. | 282 # Should only parse CL link correspoinding the revision found by the bisect. |
780 link_pattern = (r'(?<!>\s)Review URL: <a href=[\'"]' | 283 link_pattern = (r'(?<!>\s)Review URL: <a href=[\'"]' |
781 r'https://codereview.chromium.org/(\d+)[\'"].*>') | 284 r'https://codereview.chromium.org/(\d+)[\'"].*>') |
782 for link in revisions_links: | 285 for link in revisions_links: |
783 # Fetch the commit links in order to get codereview link | 286 # Fetch the commit links in order to get codereview link. |
784 response = _FetchURL(link) | 287 response = utils.FetchURL(link) |
785 if not response: | 288 if not response: |
786 continue | 289 continue |
787 rietveld_issue_ids = re.findall(link_pattern, response.content) | 290 rietveld_issue_ids = re.findall(link_pattern, response.content) |
788 for issue_id in rietveld_issue_ids: | 291 for issue_id in rietveld_issue_ids: |
789 # Fetch codereview link, and get reviewer email addresses from the | 292 # Fetch codereview link, and get reviewer email addresses from the |
790 # response JSON. | 293 # response JSON. |
791 issue_response = _FetchURL( | 294 issue_response = utils.FetchURL( |
792 'https://codereview.chromium.org/api/%s' % issue_id) | 295 'https://codereview.chromium.org/api/%s' % issue_id) |
793 if not issue_response: | 296 if not issue_response: |
794 continue | 297 continue |
795 issue_data = json.loads(issue_response.content) | 298 issue_data = json.loads(issue_response.content) |
796 reviewer_list.extend([str(item) for item in issue_data['reviewers']]) | 299 reviewer_list.extend([str(item) for item in issue_data['reviewers']]) |
797 return reviewer_list | 300 return reviewer_list |
798 | 301 |
799 | 302 |
800 def _BeautifyContent(response_data): | 303 def _SendFYIBisectEmail(job, message): |
801 """Strip lines begins with @@@ and strip leading and trailing whitespace.""" | |
802 pattern = re.compile(r'@@@.*@@@.*\n') | |
803 response_str = re.sub(pattern, '', response_data) | |
804 new_response = [line.strip() for line in response_str.split('\n')] | |
805 response_str = '\n'.join(new_response) | |
806 | |
807 delimiter = '---bisect results start here---' | |
808 if delimiter in response_str: | |
809 response_str = response_str.split(delimiter)[1] | |
810 | |
811 return response_str.rstrip() | |
812 | |
813 | |
814 def _FetchURL(request_url, skip_status_code=False): | |
815 """Wrapper around URL fetch service to make request. | |
816 | |
817 Args: | |
818 request_url: URL of request. | |
819 skip_status_code: Skips return code check when True, default is False. | |
820 | |
821 Returns: | |
822 Response object return by URL fetch, otherwise None when there's an error. | |
823 """ | |
824 logging.info('URL being fetched: ' + request_url) | |
825 try: | |
826 response = urlfetch.fetch(request_url) | |
827 except urlfetch_errors.DeadlineExceededError: | |
828 logging.error('Deadline exceeded error checking %s', request_url) | |
829 return None | |
830 except urlfetch_errors.DownloadError as err: | |
831 # DownloadError is raised to indicate a non-specific failure when there | |
832 # was not a 4xx or 5xx status code. | |
833 logging.error(err) | |
834 return None | |
835 if skip_status_code: | |
836 return response | |
837 elif response.status_code != 200: | |
838 logging.error( | |
839 'ERROR %s checking %s', response.status_code, request_url) | |
840 return None | |
841 return response | |
842 | |
843 | |
844 def _FetchRietveldIssueJSON(job): | |
845 server = rietveld_service.RietveldService(internal_only=job.internal_only) | |
846 path = 'api/%d/%d' % (job.rietveld_issue_id, job.rietveld_patchset_id) | |
847 return server.MakeRequest(path, method='GET') | |
848 | |
849 | |
850 def _RietveldIssueURL(job): | |
851 config = rietveld_service.GetDefaultRietveldConfig() | |
852 host = config.internal_server_url if job.internal_only else config.server_url | |
853 return '%s/%d' % (host, job.rietveld_issue_id) | |
854 | |
855 | |
856 def _BuildbucketStatusToStatusConstant(status, result): | |
857 """Converts the string status from buildbucket to a numeric constant.""" | |
858 # TODO(robertocn): We might need to make a difference between | |
859 # - Scheduled and Started | |
860 # - Failure and Cancelled. | |
861 if status == 'COMPLETED': | |
862 if result == 'SUCCESS': | |
863 return SUCCESS | |
864 return FAILURE | |
865 return STARTED | |
866 | |
867 | |
868 def _CheckFYIBisectJob(job, issue_tracker): | |
869 bisect_results = _GetBisectResults(job) | |
870 if not bisect_results: | |
871 logging.info('Bisect FYI: [%s] No bisect results, job might be pending.', | |
872 job.job_name) | |
873 return | |
874 logging.info('Bisect FYI: [%s] Bisect job status: %s.', | |
875 job.job_name, bisect_results['status']) | |
876 try: | |
877 if bisect_results['status'] == 'Completed': | |
878 _PostSucessfulResult(job, bisect_results, issue_tracker) | |
879 # Below in VerifyBisectFYIResults we verify whether the actual | |
880 # results matches with the expectations; if they don't match then | |
881 # bisect_results['status'] gets set to 'Failure'. | |
882 bisect_fyi.VerifyBisectFYIResults(job, bisect_results) | |
883 # Verify whether the issue is updated with bisect results, if not | |
884 # then mark the results status='Failure'. | |
885 bisect_fyi.VerifyBugUpdate(job, issue_tracker, bisect_results) | |
886 elif 'Failure' in bisect_results['status']: | |
887 _PostFailedResult( | |
888 job, bisect_results, issue_tracker, add_bug_comment=True) | |
889 bisect_results['errors'] = 'Bisect FYI job failed:\n%s' % bisect_results | |
890 except BugUpdateFailure as e: | |
891 bisect_results['status'] = 'Failure' | |
892 bisect_results['error'] = 'Bug update Failed: %s' % e | |
893 finally: | |
894 _SendFYIBisectEmail(job, bisect_results) | |
895 job.key.delete() | |
896 | |
897 | |
898 def _SendFYIBisectEmail(job, results): | |
899 """Sends an email to auto-bisect-team about FYI bisect results.""" | 304 """Sends an email to auto-bisect-team about FYI bisect results.""" |
900 # Don't send email when test case pass. | 305 email_data = email_template.GetBisectFYITryJobEmailReport(job, message) |
901 if results.get('status') == 'Completed': | |
902 logging.info('Test Passed: %s.\n Results: %s', job.job_name, results) | |
903 return | |
904 | |
905 email_data = email_template.GetBisectFYITryJobEmail(job, results) | |
906 if not email_data: | |
907 logging.error('Failed to create "email_data" from results for %s.\n' | |
908 ' Results: %s', job.job_name, results) | |
909 return | |
910 mail.send_mail(sender='gasper-alerts@google.com', | 306 mail.send_mail(sender='gasper-alerts@google.com', |
911 to='auto-bisect-team@google.com', | 307 to='auto-bisect-team@google.com', |
912 subject=email_data['subject'], | 308 subject=email_data['subject'], |
913 body=email_data['body'], | 309 body=email_data['body'], |
914 html=email_data['html']) | 310 html=email_data['html']) |
311 | |
312 | |
313 def UpdateQuickLog(job): | |
314 report = bisect_report.GetReport(job) | |
315 if not report: | |
316 logging.error('Bisect report returns empty for job id: %s', job.id) | |
317 return | |
318 formatter = quick_logger.Formatter() | |
319 logger = quick_logger.QuickLogger('bisect_result', job.bug_id, formatter) | |
320 logger.Log(report) | |
321 logger.Save() | |
OLD | NEW |