Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(212)

Side by Side Diff: appengine/findit/handlers/handlers_util.py

Issue 1827903002: [Findit] Modify handlers_util to prepare for the new UI change. (Closed) Base URL: https://chromium.googlesource.com/infra/infra.git@master
Patch Set: Add flaky info to try job result. Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | appengine/findit/handlers/swarming_task.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright 2016 The Chromium Authors. All rights reserved. 1 # Copyright 2016 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 from collections import defaultdict 5 from collections import defaultdict
6 import copy 6 import copy
7 7
8 from model import wf_analysis_status 8 from model import wf_analysis_status
9 from model.wf_analysis import WfAnalysis 9 from model.wf_analysis import WfAnalysis
10 from model.wf_analysis_status import SWARMING_STATUS_TO_DESCRIPTION
11 from model.wf_analysis_status import TRY_JOB_STATUS_TO_DESCRIPTION
10 from model.wf_swarming_task import WfSwarmingTask 12 from model.wf_swarming_task import WfSwarmingTask
11 from model.wf_try_job import WfTryJob 13 from model.wf_try_job import WfTryJob
12 from waterfall import buildbot 14 from waterfall import buildbot
13 from waterfall import waterfall_config 15 from waterfall import waterfall_config
14 16
15 17
16 FLAKY = 'Flaky' 18 FLAKY = 'Flaky'
19 NO_SWARMING_TASK_FOUND = 'No swarming rerun found.'
stgao 2016/03/24 17:59:48 When will we have this case? How about adding som
chanli 2016/03/24 19:53:01 This would happen for example we delete that task
20 NON_SWARMING_NO_RERUN = 'No swarming rerun for non-swarming tests.'
21 NO_TRY_JOB_REASON_MAP = {
22 NO_SWARMING_TASK_FOUND: 'No swarming rerun found, hence no try job.',
23 NON_SWARMING_NO_RERUN: (
24 'No swarming rerun nor try job for non-swarming tests.'),
25 SWARMING_STATUS_TO_DESCRIPTION[wf_analysis_status.PENDING]: (
26 'Swarming rerun is %s.' % (
27 SWARMING_STATUS_TO_DESCRIPTION[wf_analysis_status.PENDING])),
28 SWARMING_STATUS_TO_DESCRIPTION[wf_analysis_status.ANALYZING]: (
29 'Swarming rerun is %s.' % (
30 SWARMING_STATUS_TO_DESCRIPTION[wf_analysis_status.ANALYZING])),
31 SWARMING_STATUS_TO_DESCRIPTION[wf_analysis_status.ERROR]: (
32 'Swarming rerun has %s.' % (
33 SWARMING_STATUS_TO_DESCRIPTION[wf_analysis_status.ERROR])),
stgao 2016/03/24 17:59:48 Should we do this conversion on the UI side instea
chanli 2016/03/24 19:53:01 Done.
34 }
35
36 def _GetFailureResultMap(master_name, builder_name, build_number):
37 analysis = WfAnalysis.Get(master_name, builder_name, build_number)
38 if not analysis:
39 return None
40
41 return analysis.failure_result_map
17 42
18 43
19 def GenerateSwarmingTasksData(master_name, builder_name, build_number): 44
lijeffrey 2016/03/28 23:35:03 nit: too many blank lines
45 def _GetAllTestsForASwarmingTask(task_key, step_failure_result_map):
46 all_tests = set()
47 for test_name, test_task_key in step_failure_result_map.iteritems():
48 if task_key == test_task_key:
49 all_tests.add(test_name)
50 return list(all_tests)
51
52
53 def _GenerateSwarmingTasksData(failure_result_map):
20 """Collects info for all related swarming tasks. 54 """Collects info for all related swarming tasks.
21 55
22 Returns: A dict as below: 56 Returns: A dict as below:
23 { 57 {
24 'step1': { 58 'step1': {
25 'swarming_tasks': [ 59 'swarming_tasks': {
26 { 60 'm/b/121': {
27 'status': 'Completed', 61 'task_info': {
28 'task_id': 'task1', 62 'status': 'Completed',
29 'task_url': ( 63 'task_id': 'task1',
30 'https://chromium-swarm.appspot.com/user/task/task1'), 64 'task_url': ('https://chromium-swarm.appspot.com/user'
31 'tests': ['test2'] 65 '/task/task1')
32 }, 66 },
33 { 67 'all_tests': ['test2', 'test3', 'test4'],
34 'status': 'Completed', 68 'reliable_tests': ['test2'],
35 'task_id': 'task0', 69 'flaky_tests': ['test3', 'test4']
36 'task_url': (
37 'https://chromium-swarm.appspot.com/user/task/task0'),
38 'tests': ['test1']
39 }
40 ],
41 'tests': {
42 'test1': {
43 'status': 'Completed',
44 'task_id': 'task0',
45 'task_url': (
46 'https://chromium-swarm.appspot.com/user/task/task0')
47 },
48 'test2': {
49 'status': 'Completed',
50 'task_id': 'task1',
51 'task_url': (
52 'https://chromium-swarm.appspot.com/user/task/task1')
53 } 70 }
54 } 71 }
55 }, 72 },
56 'step2': { 73 'step2': {
57 'swarming_tasks': [ 74 'swarming_tasks': {
58 { 75 'm/b/121': {
59 'status': 'Pending' 76 'task_info': {
77 'status': 'Pending'
78 },
79 'all_tests': ['test1']
60 } 80 }
61 ], 81 }
62 'tests': { 82 },
63 'test1': { 83 'step3': {
64 'status': 'Pending' 84 'swarming_tasks': {
85 'm/b/121': {
86 'task_info': {
87 'status': 'No swarming rerun found'
stgao 2016/03/24 17:59:48 Alternative: for underlying data, use enum to indi
chanli 2016/03/24 19:53:01 Done.
88 },
89 'all_tests': ['test1']
65 } 90 }
66 } 91 }
67 } 92 }
68 } 93 }
69 """ 94 """
70 tasks_info = defaultdict(dict) 95 tasks_info = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
71 96
72 analysis = WfAnalysis.Get(master_name, builder_name, build_number)
73 if not analysis:
74 return tasks_info
75
76 failure_result_map = analysis.failure_result_map
77 if failure_result_map: 97 if failure_result_map:
78 for step_name, failure in failure_result_map.iteritems(): 98 for step_name, failure in failure_result_map.iteritems():
99 step_tasks_info = tasks_info[step_name]['swarming_tasks']
79 if isinstance(failure, dict): 100 if isinstance(failure, dict):
80 # Only trigger swarming task for swarming test failures. 101 # Only swarming test failures have swarming re-runs.
81 key_test_map = defaultdict(list) 102 swarming_task_keys = set()
82 for test_name, first_failure_key in failure.iteritems(): 103 for first_failure_key in failure.values():
83 key_test_map[first_failure_key].append(test_name) 104 swarming_task_keys.add(first_failure_key)
84 105
85 tasks_info[step_name]['swarming_tasks'] = [] 106 for key in swarming_task_keys:
86 tasks_info[step_name]['tests'] = defaultdict(dict) 107 task_dict = step_tasks_info[key]
87 step_tasks_info = tasks_info[step_name]['swarming_tasks']
88 tests = tasks_info[step_name]['tests']
89 for key, test_names in key_test_map.iteritems():
90 referred_build_keys = key.split('/') 108 referred_build_keys = key.split('/')
91 task = WfSwarmingTask.Get(*referred_build_keys, step_name=step_name) 109 task = WfSwarmingTask.Get(*referred_build_keys, step_name=step_name)
92 if not task: 110 if not task:
93 continue 111 task_info = {
94 task_info = { 112 'status': NO_SWARMING_TASK_FOUND
95 'status': wf_analysis_status.SWARMING_STATUS_TO_DESCRIPTION.get( 113 }
96 task.status) 114 task_dict['all_tests'] = _GetAllTestsForASwarmingTask(key, failure)
97 } 115 else:
98 if task.task_id: 116 task_info = {
99 task_info['task_id'] = task.task_id 117 'status': SWARMING_STATUS_TO_DESCRIPTION.get(task.status)
100 task_info['task_url'] = 'https://%s/user/task/%s' % ( 118 }
101 waterfall_config.GetSwarmingSettings()['server_host'],
102 task.task_id)
103 119
104 for test_name in test_names: 120 task_dict['all_tests'] = (
105 tests[test_name] = copy.deepcopy(task_info) 121 _GetAllTestsForASwarmingTask(key, failure)
122 if not task.parameters or
123 not task.parameters.get('tests') else task.parameters['tests'])
124 task_dict['ref_name'] = (
125 step_name.split()[0]
stgao 2016/03/24 17:59:48 mind a comment why this split? This could make the
chanli 2016/03/24 19:53:01 Done.
126 if not task.parameters or not task.parameters.get('ref_name')
127 else task.parameters['ref_name'])
106 128
107 task_info['tests'] = test_names 129 if task.task_id:
108 step_tasks_info.append(task_info) 130 task_info['task_id'] = task.task_id
131 task_info['task_url'] = 'https://%s/user/task/%s' % (
132 waterfall_config.GetSwarmingSettings()['server_host'],
stgao 2016/03/24 17:59:48 Will this cause too many reads against datastore o
chanli 2016/03/24 19:53:01 Done.
133 task.task_id)
134 if task.classified_tests:
stgao 2016/03/24 17:59:48 When we don't have such info? And how should we de
chanli 2016/03/24 19:53:01 Basically if the task is not completed yet, we wil
135 task_dict['reliable_tests'] = task.classified_tests.get(
136 'reliable_tests', [])
137 task_dict['flaky_tests'] = task.classified_tests.get(
138 'flaky_tests', [])
139
140 task_dict['task_info'] = task_info
141 else:
142 step_tasks_info[failure] = {
143 'task_info': {
144 'status': NON_SWARMING_NO_RERUN
145 }
146 }
109 147
110 return tasks_info 148 return tasks_info
111 149
112 150
151 def GetSwarmingTaskInfo(master_name, builder_name, build_number):
152 failure_result_map = _GetFailureResultMap(
153 master_name, builder_name, build_number)
lijeffrey 2016/03/28 23:35:03 nit: 4 spaces
154 return _GenerateSwarmingTasksData(failure_result_map)
155
156
113 def _GetTryJobBuildNumber(url): 157 def _GetTryJobBuildNumber(url):
114 build_keys = buildbot.ParseBuildUrl(url) 158 build_keys = buildbot.ParseBuildUrl(url)
115 return build_keys[2] 159 return build_keys[2]
116 160
117 161
118 def _GetCulpritInfoForTryJobResult(try_job_key, culprits_info): 162 def _OrganizeTryJobResultByCulprits(try_job_culprits):
163 """Re-organize try job culprits by revision.
164
165 Args:
166 try_job_culprits (dict): A dict of culprits for one step organized by test:
167 {
168 'tests': {
169 'a_test1': {
170 'revision': 'rev1',
171 'commit_position': '1',
172 'review_url': 'url_1'
173 },
174 'a_test2': {
175 'revision': 'rev1',
176 'commit_position': '1',
177 'review_url': 'url_1'
178 }
179 }
180 }
181 Returns:
182 A dict of culprits for one step organized by revison:
183 {
184 'rev1': {
185 'revision': 'rev1',
186 'commit_position': '1',
187 'review_url': 'url_1',
188 'tests': ['a_test1', 'a_test2']
189 }
190 }
191 """
192 if not try_job_culprits or not try_job_culprits.get('tests'):
193 return {}
194
195 organized_culprits = {}
196 for test_name, culprit in try_job_culprits['tests'].iteritems():
197 revision = culprit['revision']
198 if organized_culprits.get(revision):
199 organized_culprits[revision]['failed_tests'].append(test_name)
200 else:
201 organized_culprits[revision] = culprit
202 organized_culprits[revision]['failed_tests'] = [test_name]
203
204 return organized_culprits
205
206
207 def _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info):
119 referred_build_keys = try_job_key.split('/') 208 referred_build_keys = try_job_key.split('/')
120 try_job = WfTryJob.Get(*referred_build_keys) 209 try_job = WfTryJob.Get(*referred_build_keys)
121 if not try_job: 210 if not try_job or try_job.compile_results:
122 return 211 return
123 212
124 if try_job.compile_results: 213 try_job_result = try_job.test_results[-1] if try_job.test_results else None
125 try_job_result = try_job.compile_results[-1] 214
126 elif try_job.test_results: 215 for step_try_jobs in culprits_info.values():
127 try_job_result = try_job.test_results[-1] 216 additional_tests_culprit_info = []
128 else: 217 for try_job_info in step_try_jobs['try_jobs']:
129 try_job_result = None 218 if try_job_key != try_job_info['try_job_key']:
130 219 continue
131 additional_tests_culprit_info = {} 220
132 for culprit_info in culprits_info.values(): 221 if try_job_info.get('status'):
133 if culprit_info['try_job_key'] != try_job_key: 222 continue
134 continue 223 try_job_info['status'] = TRY_JOB_STATUS_TO_DESCRIPTION[try_job.status]
135 224 ref_name = try_job_info['ref_name']
136 # Only include try job result for reliable tests. 225 if try_job_result:
137 # Flaky tests have been marked as 'Flaky'. 226 # Saves try job information.
138 culprit_info['status'] = ( 227 try_job_info['try_job_url'] = try_job_result['url']
139 wf_analysis_status.TRY_JOB_STATUS_TO_DESCRIPTION[try_job.status] 228 try_job_info['try_job_build_number'] = (
140 if not culprit_info.get('status') else culprit_info['status'])
141
142 if try_job_result and culprit_info['status'] != FLAKY:
143 if try_job_result.get('url'):
144 culprit_info['try_job_url'] = try_job_result['url']
145 culprit_info['try_job_build_number'] = (
146 _GetTryJobBuildNumber(try_job_result['url'])) 229 _GetTryJobBuildNumber(try_job_result['url']))
147 if try_job_result.get('culprit'): 230 if (try_job_result.get('culprit') and
148 try_job_culprits = try_job_result['culprit'] 231 try_job_result['culprit'].get(ref_name)):
149 step = culprit_info.get('step_no_platform', culprit_info['step_name']) 232 # Saves try job culprits information.
150 test = culprit_info['test_name'] 233 culprit_tests_map = _OrganizeTryJobResultByCulprits(
151 234 try_job_result['culprit'][ref_name])
152 if test == 'N/A': # Only step level. 235
153 if try_job_culprits.get(step, {}).get('tests'): 236 all_tests = try_job_info['all_tests']
154 # try job results has specified tests. 237 list_of_culprits = []
155 step_culprits = try_job_culprits[step]['tests'] 238 for culprit_info in culprit_tests_map.values():
156 for test_name, try_job_culprit in step_culprits.iteritems(): 239 failed_tests = culprit_info['failed_tests']
157 additional_test_key = '%s-%s' % (step, test_name) 240 list_of_culprits.append(culprit_info)
158 additional_tests_culprit_info[additional_test_key] = { 241 # Get the left over tests
159 'step_name': step, 242 all_tests = list(
160 'test_name': test_name, 243 set(all_tests) ^ set(failed_tests) & set(all_tests))
161 'try_job_key': try_job_key, 244 if not all_tests:
162 'status': culprit_info['status'], 245 break
163 'try_job_url': culprit_info['try_job_url'], 246
164 'try_job_build_number': culprit_info['try_job_build_number'], 247 index_start = 1
165 'revision': try_job_culprit.get('revision'), 248 if all_tests:
166 'commit_position': try_job_culprit.get('commit_position'), 249 # There are tests don't have try job culprits.
167 'review_url': try_job_culprit.get('review_url') 250 # Group these tests together.
168 } 251 # Save them in current try_job_info.
169 continue 252 try_job_info['failed_tests'] = all_tests
253 # Saves all the culprits later.
254 index_start = 0
170 else: 255 else:
171 # For historical culprit found by try job for compile, 256 # Saves the first culprit in current try_job_info.
172 # step name is not recorded. 257 # Saves all the other culprits later.
173 culprit = try_job_culprits.get(step) or try_job_culprits 258 try_job_info['culprit'] = {
174 elif test in try_job_culprits.get(step, {}).get('tests'): 259 'revision': list_of_culprits[0]['revision'],
175 culprit = try_job_culprits[step]['tests'][test] 260 'commit_position': list_of_culprits[0]['commit_position'],
176 else: # pragma: no cover 261 'review_url': list_of_culprits[0]['review_url']
177 continue # No culprit for test found. 262 }
178 263 try_job_info['failed_tests'] = list_of_culprits[0]['failed_tests']
179 culprit_info['revision'] = culprit.get('revision') 264
180 culprit_info['commit_position'] = culprit.get('commit_position') 265 for n in xrange(index_start, len(list_of_culprits)):
181 culprit_info['review_url'] = culprit.get('review_url') 266 iterate_culprit = list_of_culprits[n]
182 267 tmp_try_job_info = copy.deepcopy(try_job_info)
183 if additional_tests_culprit_info: 268 tmp_try_job_info['culprit'] = {
184 for key, test_culprit_info in additional_tests_culprit_info.iteritems(): 269 'revision': iterate_culprit['revision'],
185 culprits_info.pop(test_culprit_info['step_name'], None) 270 'commit_position': iterate_culprit['commit_position'],
186 culprits_info[key] = test_culprit_info 271 'review_url': iterate_culprit['review_url']
187 272 }
188 273 tmp_try_job_info['failed_tests'] = iterate_culprit['failed_tests']
189 def _UpdateTryJobCulpritUsingSwarmingTask( 274 additional_tests_culprit_info.append(tmp_try_job_info)
190 step_name, failure_key_set, culprits_info): 275
191 for failure_key in failure_key_set: 276 if additional_tests_culprit_info:
192 build_keys = failure_key.split('/') 277 step_try_jobs['try_jobs'].extend(additional_tests_culprit_info)
193 task = WfSwarmingTask.Get(*build_keys, step_name=step_name) 278
194 if not task: 279
195 continue 280
196 classified_tests = task.classified_tests 281 def _UpdateTryJobInfoBasedOnSwarming(step_tasks_info, try_jobs):
197 step_no_platform = task.parameters.get( 282 """
198 'ref_name', step_name.split()[0]) 283 Args:
199 for culprit_info in culprits_info.values(): 284 step_tasks_info (dict): A dict of swarming task info for this step.
200 if (culprit_info['try_job_key'] == failure_key and 285 It is the result from _GenerateSwarmingTasksData.
201 step_name == culprit_info['step_name']): 286 try_jobs (list): A list to save try job data for the step, format as below:
202 culprit_info['step_no_platform'] = step_no_platform 287 [
203 if culprit_info['test_name'] in classified_tests.get('flaky_tests', []): 288 {
204 culprit_info['status'] = FLAKY 289 'try_job_key': 'm/b/120'
290 },
291 {
292 'try_job_key': 'm/b/121'
293 },
294 ...
295 ]
296 """
297 additional_flakiness_list = []
298 for try_job in try_jobs:
299 try_job_key = try_job['try_job_key']
300 for swarming_task_key, task in step_tasks_info.get(
301 'swarming_tasks', {}).iteritems():
302 if swarming_task_key != try_job_key:
303 continue
304
305 if task['task_info']['status'] != SWARMING_STATUS_TO_DESCRIPTION.get(
306 wf_analysis_status.ANALYZED):
307 # There is someting wrong with swarming task or it's not done yet,
308 # no try job yet.
309 try_job['status'] = NO_TRY_JOB_REASON_MAP.get(
310 task['task_info']['status'], 'No try job')
311 else:
312 try_job['ref_name'] = task['ref_name']
313 if task.get('reliable_tests'):
314 try_job['all_tests'] = task['reliable_tests']
315 elif task.get('flaky_tests'): # pragma: no cover
316 # All Flaky.
317 try_job['status'] = FLAKY
318 try_job['all_tests'] = task['flaky_tests']
319
320 if task['task_info'].get('task_id'):
321 task_info_str = '<a href="%s">%s</a>' %(
322 task['task_info']['task_url'], task['task_info']['task_id'])
323 if try_job.get('status'):
324 try_job['status'] = '%s Swarming rerun info: %s'% (
325 try_job['status'], task_info_str)
326
327 if task.get('flaky_tests') and task.get('reliable_tests'):
328 # Split this try job into two: flaky group and reliable group.
329 flaky_try_job = copy.deepcopy(try_job)
330 flaky_try_job['status'] = '%s Swarming rerun info: %s'% (
331 FLAKY, task_info_str)
332 flaky_try_job['all_tests'] = task['flaky_tests']
333 additional_flakiness_list.append(flaky_try_job)
334
335 if additional_flakiness_list:
336 try_jobs.extend(additional_flakiness_list)
337 print try_jobs
338
339
340 def _GetAllTryJobResultsForTest(failure_result_map, tasks_info):
341 culprits_info = defaultdict(lambda: defaultdict(list))
342 if not tasks_info:
343 return culprits_info
344
345 try_job_keys = set()
346 for step_name, step_failure_result_map in failure_result_map.iteritems():
347 try_jobs = culprits_info[step_name]['try_jobs']
348
349 if isinstance(step_failure_result_map, dict):
350 for try_job_key in step_failure_result_map.values():
351 if try_job_key not in try_job_keys:
352 try_job_dict = {
353 'try_job_key': try_job_key
354 }
355 try_jobs.append(try_job_dict)
356 try_job_keys.add(try_job_key)
357 else:
358 # Try job should only be triggered for swarming tests, because we cannot
359 # identify flaky tests for non-swarming tests.
360 try_job_dict = {
361 'try_job_key': step_failure_result_map
362 }
363 try_jobs.append(try_job_dict)
364
365 _UpdateTryJobInfoBasedOnSwarming(tasks_info[step_name], try_jobs)
366
367 for try_job_key in try_job_keys:
368 _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info)
369
370 return culprits_info
371
372
373 def _GetTryJobResultForCompile(failure_result_map):
374 try_job_key = failure_result_map['compile']
375 referred_build_keys = try_job_key.split('/')
376 culprit_info = defaultdict(list)
377
378 try_job = WfTryJob.Get(*referred_build_keys)
379 if not try_job or try_job.test_results:
380 return culprit_info
381
382 try_job_result = (
383 try_job.compile_results[-1] if try_job.compile_results else None)
384
385 compile_try_job = {
386 'try_job_key': try_job_key,
387 'status': TRY_JOB_STATUS_TO_DESCRIPTION[try_job.status]
388 }
389
390 if try_job_result:
391 if try_job_result.get('url'):
392 compile_try_job['try_job_url'] = try_job_result['url']
393 compile_try_job['try_job_build_number'] = (
394 _GetTryJobBuildNumber(try_job_result['url']))
395 if try_job_result.get('culprit', {}).get('compile'):
396 compile_try_job['culprit'] = try_job_result['culprit']['compile']
397
398 culprit_info['compile'].append(compile_try_job)
399 return culprit_info
205 400
206 401
207 def GetAllTryJobResults(master_name, builder_name, build_number): 402 def GetAllTryJobResults(master_name, builder_name, build_number):
208 culprits_info = {} 403 culprits_info = {}
209 try_job_keys = set() 404 is_test_failure = True
210 405
211 analysis = WfAnalysis.Get(master_name, builder_name, build_number) 406 failure_result_map = _GetFailureResultMap(
212 if not analysis: 407 master_name, builder_name, build_number)
213 return culprits_info 408
214
215 failure_result_map = analysis.failure_result_map
216 if failure_result_map: 409 if failure_result_map:
217 # failure_result_map uses step_names as keys and saves referred try_job_keys 410 for step_name in failure_result_map:
218 # If non-swarming, step_name and referred_try_job_key match directly as: 411 if step_name == 'compile':
219 # step_name: try_job_key 412 is_test_failure = False
220 # If swarming, add one more layer of tests, so the format would be: 413 break
221 # step_name: { 414 if is_test_failure:
222 # test_name1: try_job_key1, 415 tasks_info = _GenerateSwarmingTasksData(failure_result_map)
223 # test_name2: try_job_key2, 416 culprits_info = _GetAllTryJobResultsForTest(
224 # ... 417 failure_result_map, tasks_info)
225 # } 418 else:
226 for step_name, step_failure_result_map in failure_result_map.iteritems(): 419 culprits_info = _GetTryJobResultForCompile(failure_result_map)
227 if isinstance(step_failure_result_map, dict):
228 step_refering_keys = set()
229 for failed_test, try_job_key in step_failure_result_map.iteritems():
230 step_test_key = '%s-%s' % (step_name, failed_test)
231 culprits_info[step_test_key] = {
232 'step_name': step_name,
233 'test_name': failed_test,
234 'try_job_key': try_job_key
235 }
236 step_refering_keys.add(try_job_key)
237
238 _UpdateTryJobCulpritUsingSwarmingTask(
239 step_name, step_refering_keys, culprits_info)
240 try_job_keys.update(step_refering_keys)
241 else:
242 culprits_info[step_name] = {
243 'step_name': step_name,
244 'test_name': 'N/A',
245 'try_job_key': step_failure_result_map
246 }
247 try_job_keys.add(step_failure_result_map)
248
249 for try_job_key in try_job_keys:
250 _GetCulpritInfoForTryJobResult(try_job_key, culprits_info)
251 420
252 return culprits_info 421 return culprits_info
OLDNEW
« no previous file with comments | « no previous file | appengine/findit/handlers/swarming_task.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698