Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(10)

Side by Side Diff: appengine/findit/handlers/handlers_util.py

Issue 1827903002: [Findit] Modify handlers_util to prepare for the new UI change. (Closed) Base URL: https://chromium.googlesource.com/infra/infra.git@master
Patch Set: Modify data format for compile failure Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | appengine/findit/handlers/swarming_task.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright 2016 The Chromium Authors. All rights reserved. 1 # Copyright 2016 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 from collections import defaultdict 5 from collections import defaultdict
6 import copy 6 import copy
7 7
8 from model import wf_analysis_status 8 from model import wf_analysis_status
9 from model.wf_analysis import WfAnalysis 9 from model.wf_analysis import WfAnalysis
10 from model.wf_swarming_task import WfSwarmingTask 10 from model.wf_swarming_task import WfSwarmingTask
11 from model.wf_try_job import WfTryJob 11 from model.wf_try_job import WfTryJob
12 from waterfall import buildbot 12 from waterfall import buildbot
13 from waterfall import waterfall_config 13 from waterfall import waterfall_config
14 14
15 15
16 FLAKY = 'Flaky' 16 FLAKY = 200
stgao 2016/03/25 22:50:23 How about creating a message module for these? FL
chanli 2016/03/25 23:22:03 Per our previous discussion, the status here are a
17
18 # Additional status for swarming tasks.
19 NO_SWARMING_TASK_FOUND = 110
20 NON_SWARMING_NO_RERUN = 120
21
22 # Additional status for try jobs.
23 NO_TRY_JOB_REASON_MAP = {
24 NO_SWARMING_TASK_FOUND: NO_SWARMING_TASK_FOUND,
25 NON_SWARMING_NO_RERUN: NON_SWARMING_NO_RERUN,
26 wf_analysis_status.PENDING: 130,
27 wf_analysis_status.ANALYZING: 140,
28 wf_analysis_status.ERROR: 150,
29 }
17 30
18 31
19 def GenerateSwarmingTasksData(master_name, builder_name, build_number): 32 def _GetFailureResultMap(master_name, builder_name, build_number):
33 analysis = WfAnalysis.Get(master_name, builder_name, build_number)
34 if not analysis:
35 return None
36
37 return analysis.failure_result_map
38
39
40
41 def _GetAllTestsForASwarmingTask(task_key, step_failure_result_map):
stgao 2016/03/25 22:50:24 nit: three empty lines above.
chanli 2016/03/25 23:22:03 Done.
42 all_tests = set()
43 for test_name, test_task_key in step_failure_result_map.iteritems():
44 if task_key == test_task_key:
45 all_tests.add(test_name)
46 return list(all_tests)
47
48
49 def _GenerateSwarmingTasksData(failure_result_map):
20 """Collects info for all related swarming tasks. 50 """Collects info for all related swarming tasks.
21 51
22 Returns: A dict as below: 52 Returns: A dict as below:
23 { 53 {
24 'step1': { 54 'step1': {
25 'swarming_tasks': [ 55 'swarming_tasks': {
26 { 56 'm/b/121': {
27 'status': 'Completed', 57 'task_info': {
28 'task_id': 'task1', 58 'status': 'Completed',
29 'task_url': ( 59 'task_id': 'task1',
30 'https://chromium-swarm.appspot.com/user/task/task1'), 60 'task_url': ('https://chromium-swarm.appspot.com/user'
31 'tests': ['test2'] 61 '/task/task1')
32 }, 62 },
33 { 63 'all_tests': ['test2', 'test3', 'test4'],
34 'status': 'Completed', 64 'reliable_tests': ['test2'],
35 'task_id': 'task0', 65 'flaky_tests': ['test3', 'test4']
36 'task_url': (
37 'https://chromium-swarm.appspot.com/user/task/task0'),
38 'tests': ['test1']
39 }
40 ],
41 'tests': {
42 'test1': {
43 'status': 'Completed',
44 'task_id': 'task0',
45 'task_url': (
46 'https://chromium-swarm.appspot.com/user/task/task0')
47 },
48 'test2': {
49 'status': 'Completed',
50 'task_id': 'task1',
51 'task_url': (
52 'https://chromium-swarm.appspot.com/user/task/task1')
53 } 66 }
54 } 67 }
55 }, 68 },
56 'step2': { 69 'step2': {
57 'swarming_tasks': [ 70 'swarming_tasks': {
58 { 71 'm/b/121': {
59 'status': 'Pending' 72 'task_info': {
73 'status': 'Pending'
74 },
75 'all_tests': ['test1']
60 } 76 }
61 ], 77 }
62 'tests': { 78 },
63 'test1': { 79 'step3': {
64 'status': 'Pending' 80 'swarming_tasks': {
81 'm/b/121': {
82 'task_info': {
83 'status': 'No swarming rerun found'
84 },
85 'all_tests': ['test1']
65 } 86 }
66 } 87 }
67 } 88 }
68 } 89 }
69 """ 90 """
70 tasks_info = defaultdict(dict) 91 swarming_server = waterfall_config.GetSwarmingSettings()['server_host']
stgao 2016/03/25 22:50:23 This datastore read is not needed if we don't proc
chanli 2016/03/25 23:22:03 Done.
71 92
72 analysis = WfAnalysis.Get(master_name, builder_name, build_number) 93 tasks_info = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
73 if not analysis:
74 return tasks_info
75 94
76 failure_result_map = analysis.failure_result_map
77 if failure_result_map: 95 if failure_result_map:
78 for step_name, failure in failure_result_map.iteritems(): 96 for step_name, failure in failure_result_map.iteritems():
97 step_tasks_info = tasks_info[step_name]['swarming_tasks']
79 if isinstance(failure, dict): 98 if isinstance(failure, dict):
80 # Only trigger swarming task for swarming test failures. 99 # Only swarming test failures have swarming re-runs.
81 key_test_map = defaultdict(list) 100 swarming_task_keys = set()
82 for test_name, first_failure_key in failure.iteritems(): 101 for first_failure_key in failure.values():
83 key_test_map[first_failure_key].append(test_name) 102 swarming_task_keys.add(first_failure_key)
stgao 2016/03/25 22:50:24 Maybe simplified with: swarming_task_keys = set(f
chanli 2016/03/25 23:22:03 Done.
84 103
85 tasks_info[step_name]['swarming_tasks'] = [] 104 for key in swarming_task_keys:
86 tasks_info[step_name]['tests'] = defaultdict(dict) 105 task_dict = step_tasks_info[key]
87 step_tasks_info = tasks_info[step_name]['swarming_tasks']
88 tests = tasks_info[step_name]['tests']
89 for key, test_names in key_test_map.iteritems():
90 referred_build_keys = key.split('/') 106 referred_build_keys = key.split('/')
91 task = WfSwarmingTask.Get(*referred_build_keys, step_name=step_name) 107 task = WfSwarmingTask.Get(*referred_build_keys, step_name=step_name)
92 if not task: 108 if not task: # In case task got manually removed from data store.
93 continue 109 task_info = {
94 task_info = { 110 'status': NO_SWARMING_TASK_FOUND
95 'status': wf_analysis_status.SWARMING_STATUS_TO_DESCRIPTION.get( 111 }
96 task.status) 112 task_dict['all_tests'] = _GetAllTestsForASwarmingTask(key, failure)
97 } 113 else:
98 if task.task_id: 114 task_info = {
99 task_info['task_id'] = task.task_id 115 'status': task.status
100 task_info['task_url'] = 'https://%s/user/task/%s' % ( 116 }
101 waterfall_config.GetSwarmingSettings()['server_host'],
102 task.task_id)
103 117
104 for test_name in test_names: 118 task_dict['all_tests'] = (
105 tests[test_name] = copy.deepcopy(task_info) 119 _GetAllTestsForASwarmingTask(key, failure)
120 if not (task.parameters and task.parameters.get('tests'))
121 else task.parameters['tests'])
106 122
107 task_info['tests'] = test_names 123 # Get the step name without platform.
108 step_tasks_info.append(task_info) 124 # This value should have been saved in task.parameters;
125 # in case of no such value saved, split the step_name.
126 task_dict['ref_name'] = (
127 step_name.split()[0]
128 if not task.parameters or not task.parameters.get('ref_name')
129 else task.parameters['ref_name'])
130
131 if task.task_id: # Swarming rerun has started.
132 task_info['task_id'] = task.task_id
133 task_info['task_url'] = 'https://%s/user/task/%s' % (
134 swarming_server, task.task_id)
135 if task.classified_tests: # Swarming rerun has result.
stgao 2016/03/25 22:50:23 This is when the Swarming task completed?
chanli 2016/03/25 23:22:03 Yes. this is derived from the result of swarming t
136 task_dict['reliable_tests'] = task.classified_tests.get(
137 'reliable_tests', [])
138 task_dict['flaky_tests'] = task.classified_tests.get(
139 'flaky_tests', [])
140
141 task_dict['task_info'] = task_info
142 else:
143 step_tasks_info[failure] = {
144 'task_info': {
145 'status': NON_SWARMING_NO_RERUN
146 }
147 }
109 148
110 return tasks_info 149 return tasks_info
111 150
112 151
152 def GetSwarmingTaskInfo(master_name, builder_name, build_number):
153 failure_result_map = _GetFailureResultMap(
154 master_name, builder_name, build_number)
stgao 2016/03/25 22:50:23 nit: indent
chanli 2016/03/25 23:22:03 Done.
155 return _GenerateSwarmingTasksData(failure_result_map)
156
157
113 def _GetTryJobBuildNumber(url): 158 def _GetTryJobBuildNumber(url):
114 build_keys = buildbot.ParseBuildUrl(url) 159 build_keys = buildbot.ParseBuildUrl(url)
115 return build_keys[2] 160 return build_keys[2]
116 161
117 162
118 def _GetCulpritInfoForTryJobResult(try_job_key, culprits_info): 163 def _OrganizeTryJobResultByCulprits(try_job_culprits):
164 """Re-organize try job culprits by revision.
165
166 Args:
167 try_job_culprits (dict): A dict of culprits for one step organized by test:
168 {
169 'tests': {
170 'a_test1': {
171 'revision': 'rev1',
172 'commit_position': '1',
173 'review_url': 'url_1'
174 },
175 'a_test2': {
176 'revision': 'rev1',
177 'commit_position': '1',
178 'review_url': 'url_1'
179 }
180 }
181 }
182 Returns:
183 A dict of culprits for one step organized by revison:
184 {
185 'rev1': {
186 'revision': 'rev1',
187 'commit_position': '1',
188 'review_url': 'url_1',
189 'tests': ['a_test1', 'a_test2']
190 }
191 }
192 """
193 if not try_job_culprits or not try_job_culprits.get('tests'):
194 return {}
195
196 organized_culprits = {}
197 for test_name, culprit in try_job_culprits['tests'].iteritems():
198 revision = culprit['revision']
199 if organized_culprits.get(revision):
200 organized_culprits[revision]['failed_tests'].append(test_name)
201 else:
202 organized_culprits[revision] = culprit
203 organized_culprits[revision]['failed_tests'] = [test_name]
204
205 return organized_culprits
206
207
208 def _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info):
119 referred_build_keys = try_job_key.split('/') 209 referred_build_keys = try_job_key.split('/')
120 try_job = WfTryJob.Get(*referred_build_keys) 210 try_job = WfTryJob.Get(*referred_build_keys)
121 if not try_job: 211 if not try_job or try_job.compile_results:
122 return 212 return
123 213
124 if try_job.compile_results: 214 try_job_result = try_job.test_results[-1] if try_job.test_results else None
125 try_job_result = try_job.compile_results[-1] 215
126 elif try_job.test_results: 216 for step_try_jobs in culprits_info.values():
127 try_job_result = try_job.test_results[-1] 217 # If try job found different culprits for each test, split tests by culprit.
128 else: 218 additional_tests_culprit_info = []
129 try_job_result = None 219 for try_job_info in step_try_jobs['try_jobs']:
130 220 if try_job_key != try_job_info['try_job_key']:
131 additional_tests_culprit_info = {} 221 continue
132 for culprit_info in culprits_info.values(): 222
133 if culprit_info['try_job_key'] != try_job_key: 223 if try_job_info.get('status'):
stgao 2016/03/25 22:50:24 "if not" instead?
chanli 2016/03/25 23:22:03 I use 'if' here to reduce one level of indentation
134 continue 224 # The try job has been updated by swarming task, no try job yet or ever.
stgao 2016/03/25 22:50:23 this comment seems not clear enough.
chanli 2016/03/25 23:22:03 Done.
135 225 continue
136 # Only include try job result for reliable tests. 226
137 # Flaky tests have been marked as 'Flaky'. 227 try_job_info['status'] = try_job.status
138 culprit_info['status'] = ( 228 # Needs to use ref_name to match step_name in try job.
139 wf_analysis_status.TRY_JOB_STATUS_TO_DESCRIPTION[try_job.status] 229 ref_name = try_job_info['ref_name']
stgao 2016/03/25 22:50:23 nit: not used outside of if below.
chanli 2016/03/25 23:22:03 Done.
140 if not culprit_info.get('status') else culprit_info['status']) 230 if try_job_result:
141 231 # Saves try job information.
142 if try_job_result and culprit_info['status'] != FLAKY: 232 try_job_info['try_job_url'] = try_job_result['url']
143 if try_job_result.get('url'): 233 try_job_info['try_job_build_number'] = (
144 culprit_info['try_job_url'] = try_job_result['url']
145 culprit_info['try_job_build_number'] = (
146 _GetTryJobBuildNumber(try_job_result['url'])) 234 _GetTryJobBuildNumber(try_job_result['url']))
147 if try_job_result.get('culprit'): 235
148 try_job_culprits = try_job_result['culprit'] 236 if (try_job_result.get('culprit') and
149 step = culprit_info.get('step_no_platform', culprit_info['step_name']) 237 try_job_result['culprit'].get(ref_name)):
150 test = culprit_info['test_name'] 238 # Saves try job culprits information.
151 239
152 if test == 'N/A': # Only step level. 240 # Uses culprits to group tests.
153 if try_job_culprits.get(step, {}).get('tests'): 241 culprit_tests_map = _OrganizeTryJobResultByCulprits(
154 # try job results has specified tests. 242 try_job_result['culprit'][ref_name])
155 step_culprits = try_job_culprits[step]['tests'] 243 all_tests = try_job_info['tests']
156 for test_name, try_job_culprit in step_culprits.iteritems(): 244 list_of_culprits = []
157 additional_test_key = '%s-%s' % (step, test_name) 245 for culprit_info in culprit_tests_map.values():
158 additional_tests_culprit_info[additional_test_key] = { 246 failed_tests = culprit_info['failed_tests']
159 'step_name': step, 247 list_of_culprits.append(culprit_info)
160 'test_name': test_name, 248 # Gets left over tests that didn't failed because of this culprit.
161 'try_job_key': try_job_key, 249 all_tests = list(set(all_tests) ^ set(failed_tests))
stgao 2016/03/25 22:50:24 Name of ``all_tests`` seems a little misleading.
chanli 2016/03/25 23:22:03 Done.
162 'status': culprit_info['status'], 250 if not all_tests:
163 'try_job_url': culprit_info['try_job_url'], 251 break
stgao 2016/03/25 22:50:24 Why we stop here? Mind a comment?
chanli 2016/03/25 23:22:03 Done.
164 'try_job_build_number': culprit_info['try_job_build_number'], 252
165 'revision': try_job_culprit.get('revision'), 253 index_start = 1
166 'commit_position': try_job_culprit.get('commit_position'), 254 if all_tests:
167 'review_url': try_job_culprit.get('review_url') 255 # There are tests don't have try job culprits.
168 } 256 # Group these tests together.
169 continue 257 # Save them in current try_job_info.
258 try_job_info['tests'] = all_tests
259 try_job_info['culprit'] = {}
260 # Saves all the tests that have culprits later.
261 index_start = 0
170 else: 262 else:
171 # For historical culprit found by try job for compile, 263 # Saves the first culprit in current try_job_info.
172 # step name is not recorded. 264 # Saves all the other culprits later.
173 culprit = try_job_culprits.get(step) or try_job_culprits 265 try_job_info['culprit'] = {
174 elif test in try_job_culprits.get(step, {}).get('tests'): 266 'revision': list_of_culprits[0]['revision'],
175 culprit = try_job_culprits[step]['tests'][test] 267 'commit_position': list_of_culprits[0]['commit_position'],
176 else: # pragma: no cover 268 'review_url': list_of_culprits[0]['review_url']
177 continue # No culprit for test found. 269 }
178 270 try_job_info['tests'] = list_of_culprits[0]['failed_tests']
179 culprit_info['revision'] = culprit.get('revision') 271
180 culprit_info['commit_position'] = culprit.get('commit_position') 272 for n in xrange(index_start, len(list_of_culprits)):
181 culprit_info['review_url'] = culprit.get('review_url') 273 iterate_culprit = list_of_culprits[n]
182 274 tmp_try_job_info = copy.deepcopy(try_job_info)
183 if additional_tests_culprit_info: 275 tmp_try_job_info['culprit'] = {
184 for key, test_culprit_info in additional_tests_culprit_info.iteritems(): 276 'revision': iterate_culprit['revision'],
185 culprits_info.pop(test_culprit_info['step_name'], None) 277 'commit_position': iterate_culprit['commit_position'],
186 culprits_info[key] = test_culprit_info 278 'review_url': iterate_culprit['review_url']
187 279 }
188 280 tmp_try_job_info['tests'] = iterate_culprit['failed_tests']
189 def _UpdateTryJobCulpritUsingSwarmingTask( 281 additional_tests_culprit_info.append(tmp_try_job_info)
190 step_name, failure_key_set, culprits_info): 282
191 for failure_key in failure_key_set: 283 if additional_tests_culprit_info:
192 build_keys = failure_key.split('/') 284 step_try_jobs['try_jobs'].extend(additional_tests_culprit_info)
193 task = WfSwarmingTask.Get(*build_keys, step_name=step_name) 285
194 if not task: 286
195 continue 287 def _UpdateTryJobInfoBasedOnSwarming(step_tasks_info, try_jobs):
196 classified_tests = task.classified_tests 288 """
197 step_no_platform = task.parameters.get( 289 Args:
198 'ref_name', step_name.split()[0]) 290 step_tasks_info (dict): A dict of swarming task info for this step.
199 for culprit_info in culprits_info.values(): 291 It is the result from _GenerateSwarmingTasksData.
200 if (culprit_info['try_job_key'] == failure_key and 292 try_jobs (list): A list to save try job data for the step, format as below:
201 step_name == culprit_info['step_name']): 293 [
202 culprit_info['step_no_platform'] = step_no_platform 294 {
203 if culprit_info['test_name'] in classified_tests.get('flaky_tests', []): 295 'try_job_key': 'm/b/120'
204 culprit_info['status'] = FLAKY 296 },
297 {
298 'try_job_key': 'm/b/121'
299 },
300 ...
301 ]
302 """
303 additional_flakiness_list = []
304 for try_job in try_jobs:
305 try_job_key = try_job['try_job_key']
306 for swarming_task_key, task in step_tasks_info.get(
307 'swarming_tasks', {}).iteritems():
308 if swarming_task_key != try_job_key:
stgao 2016/03/25 22:50:23 Why we need this check? A comment to explain is ap
chanli 2016/03/25 23:22:03 Yes... There will only be one task for each step i
309 continue
310
311 if task['task_info']['status'] != wf_analysis_status.ANALYZED:
312 # There is someting wrong with swarming task or it's not done yet,
313 # no try job yet or ever.
314 try_job['status'] = NO_TRY_JOB_REASON_MAP[task['task_info']['status']]
315 try_job['tests'] = task['all_tests']
316 else:
317 try_job['ref_name'] = task['ref_name']
318 if task.get('reliable_tests'):
319 try_job['tests'] = task['reliable_tests']
320 elif task.get('flaky_tests'): # pragma: no cover
stgao 2016/03/25 22:50:23 It might be possible that a step has both flaky an
chanli 2016/03/25 23:22:03 Yes. That case is handled by ln329 - ln336.
321 # All Flaky.
322 try_job['status'] = FLAKY
323 try_job['tests'] = task['flaky_tests']
324
325 if task['task_info'].get('task_id'):
326 try_job['task_id'] = task['task_info']['task_id']
327 try_job['task_url'] = task['task_info']['task_url']
328
329 if task.get('flaky_tests') and task.get('reliable_tests'):
330 # Split this try job into two groups: flaky group and reliable group.
331 flaky_try_job = copy.deepcopy(try_job)
332 flaky_try_job['status'] = FLAKY
333 flaky_try_job['tests'] = task['flaky_tests']
334 flaky_try_job['task_id'] = task['task_info']['task_id']
335 flaky_try_job['task_url'] = task['task_info']['task_url']
336 additional_flakiness_list.append(flaky_try_job)
337
338 if additional_flakiness_list:
339 try_jobs.extend(additional_flakiness_list)
340
341
342 def _GetAllTryJobResultsForTest(failure_result_map, tasks_info):
343 culprits_info = defaultdict(lambda: defaultdict(list))
344 if not tasks_info:
345 return culprits_info
346
347 try_job_keys = set()
348 for step_name, step_failure_result_map in failure_result_map.iteritems():
349 try_jobs = culprits_info[step_name]['try_jobs']
350
351 if isinstance(step_failure_result_map, dict):
352 for try_job_key in step_failure_result_map.values():
353 if try_job_key not in try_job_keys:
354 try_job_dict = {
355 'try_job_key': try_job_key
356 }
357 try_jobs.append(try_job_dict)
358 try_job_keys.add(try_job_key)
359 else:
360 # Try job should only be triggered for swarming tests, because we cannot
361 # identify flaky tests for non-swarming tests.
362 try_job_dict = {
363 'try_job_key': step_failure_result_map
364 }
365 try_jobs.append(try_job_dict)
366
367 _UpdateTryJobInfoBasedOnSwarming(tasks_info[step_name], try_jobs)
368
369 for try_job_key in try_job_keys:
370 _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info)
371
372 return culprits_info
373
374
375 def _GetTryJobResultForCompile(failure_result_map):
376 try_job_key = failure_result_map['compile']
377 referred_build_keys = try_job_key.split('/')
378 culprit_info = defaultdict(lambda: defaultdict(list))
379
380 try_job = WfTryJob.Get(*referred_build_keys)
381 if not try_job or try_job.test_results:
382 return culprit_info
383
384 try_job_result = (
385 try_job.compile_results[-1] if try_job.compile_results else None)
386
387 compile_try_job = {
388 'try_job_key': try_job_key,
389 'status': try_job.status
390 }
391
392 if try_job_result:
393 if try_job_result.get('url'):
394 compile_try_job['try_job_url'] = try_job_result['url']
395 compile_try_job['try_job_build_number'] = (
396 _GetTryJobBuildNumber(try_job_result['url']))
397 if try_job_result.get('culprit', {}).get('compile'):
398 compile_try_job['culprit'] = try_job_result['culprit']['compile']
399
400 culprit_info['compile']['try_jobs'].append(compile_try_job)
401 return culprit_info
205 402
206 403
207 def GetAllTryJobResults(master_name, builder_name, build_number): 404 def GetAllTryJobResults(master_name, builder_name, build_number):
208 culprits_info = {} 405 culprits_info = {}
209 try_job_keys = set() 406 is_test_failure = True
210 407
211 analysis = WfAnalysis.Get(master_name, builder_name, build_number) 408 failure_result_map = _GetFailureResultMap(
212 if not analysis: 409 master_name, builder_name, build_number)
stgao 2016/03/25 22:50:23 nit: indent.
chanli 2016/03/25 23:22:03 Done.
213 return culprits_info 410
214
215 failure_result_map = analysis.failure_result_map
216 if failure_result_map: 411 if failure_result_map:
217 # failure_result_map uses step_names as keys and saves referred try_job_keys 412 for step_name in failure_result_map:
218 # If non-swarming, step_name and referred_try_job_key match directly as: 413 if step_name == 'compile':
219 # step_name: try_job_key 414 is_test_failure = False
220 # If swarming, add one more layer of tests, so the format would be: 415 break
221 # step_name: { 416 if is_test_failure:
222 # test_name1: try_job_key1, 417 tasks_info = _GenerateSwarmingTasksData(failure_result_map)
223 # test_name2: try_job_key2, 418 culprits_info = _GetAllTryJobResultsForTest(
224 # ... 419 failure_result_map, tasks_info)
225 # } 420 else:
226 for step_name, step_failure_result_map in failure_result_map.iteritems(): 421 culprits_info = _GetTryJobResultForCompile(failure_result_map)
227 if isinstance(step_failure_result_map, dict):
228 step_refering_keys = set()
229 for failed_test, try_job_key in step_failure_result_map.iteritems():
230 step_test_key = '%s-%s' % (step_name, failed_test)
231 culprits_info[step_test_key] = {
232 'step_name': step_name,
233 'test_name': failed_test,
234 'try_job_key': try_job_key
235 }
236 step_refering_keys.add(try_job_key)
237
238 _UpdateTryJobCulpritUsingSwarmingTask(
239 step_name, step_refering_keys, culprits_info)
240 try_job_keys.update(step_refering_keys)
241 else:
242 culprits_info[step_name] = {
243 'step_name': step_name,
244 'test_name': 'N/A',
245 'try_job_key': step_failure_result_map
246 }
247 try_job_keys.add(step_failure_result_map)
248
249 for try_job_key in try_job_keys:
250 _GetCulpritInfoForTryJobResult(try_job_key, culprits_info)
251 422
252 return culprits_info 423 return culprits_info
OLDNEW
« no previous file with comments | « no previous file | appengine/findit/handlers/swarming_task.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698