Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(655)

Side by Side Diff: appengine/findit/waterfall/try_job_util.py

Issue 2179283009: [Findit] Compare lists to lists instead of lists to tuples, and change tests (Closed) Base URL: https://chromium.googlesource.com/infra/infra.git@master
Patch Set: Nit function rename, update comment wording Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright 2015 The Chromium Authors. All rights reserved. 1 # Copyright 2015 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 from datetime import datetime 5 from datetime import datetime
6 import logging 6 import logging
7 7
8 from google.appengine.ext import ndb 8 from google.appengine.ext import ndb
9 9
10 from common import appengine_util 10 from common import appengine_util
(...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after
137 return failed_steps_and_tests 137 return failed_steps_and_tests
138 138
139 for failed_step_name, failed_step in failed_steps.iteritems(): 139 for failed_step_name, failed_step in failed_steps.iteritems():
140 failed_steps_and_tests[failed_step_name] = [] 140 failed_steps_and_tests[failed_step_name] = []
141 for test_name in failed_step.get('tests', {}): 141 for test_name in failed_step.get('tests', {}):
142 failed_steps_and_tests[failed_step_name].append(test_name) 142 failed_steps_and_tests[failed_step_name].append(test_name)
143 143
144 return failed_steps_and_tests 144 return failed_steps_and_tests
145 145
146 146
147 def GenPotentialCulpritTupleList(heuristic_result): 147 def GetSuspectedCLsWithFailures(heuristic_result):
148 """Generates a list of potential culprit tuples. 148 """Generates a list of suspected CLs with failures.
149 149
150 Args: 150 Args:
151 heuristic_result: the heuristic_result from which to generate a potential 151 heuristic_result: the heuristic_result from which to generate the list of
152 culprit tuple list. 152 suspected CLs with failures.
153 153
154 Returns: 154 Returns:
155 A list of cultprit tuples that each could look like: 155 A list of suspected CLs with failures that each could look like:
156 156
157 (step_name, revision, test_name) 157 [step_name, revision, test_name]
158 158
159 or could look like: 159 or could look like:
160 160
161 (step_name, revision, None) 161 [step_name, revision, None]
162 """ 162 """
163 potential_culprit_tuple_list = [] 163 suspected_cls_with_failures = []
164 164
165 if not heuristic_result: 165 if not heuristic_result:
166 return potential_culprit_tuple_list 166 return suspected_cls_with_failures
167 167
168 # Iterates through the failures, tests, and suspected_cls, appending potential 168 # Iterates through the failures, tests, and suspected_cls, appending suspected
169 # (step_name, test_name, revision) and (step_name, revision) culprit tuples to 169 # CLs and failures to the list.
170 # the list.
171 for failure in heuristic_result['failures']: 170 for failure in heuristic_result['failures']:
172 if failure.get('tests'): 171 if failure.get('tests'):
173 for test in failure['tests']: 172 for test in failure['tests']:
174 for suspected_cl in test.get('suspected_cls', []): 173 for suspected_cl in test.get('suspected_cls', []):
175 potential_culprit_tuple_list.append(( 174 suspected_cls_with_failures.append([
176 failure['step_name'], 175 failure['step_name'],
177 suspected_cl['revision'], 176 suspected_cl['revision'],
178 test['test_name'])) 177 test['test_name']])
179 else: 178 else:
180 for suspected_cl in failure['suspected_cls']: 179 for suspected_cl in failure['suspected_cls']:
181 potential_culprit_tuple_list.append(( 180 suspected_cls_with_failures.append([
182 failure['step_name'], 181 failure['step_name'],
183 suspected_cl['revision'], 182 suspected_cl['revision'],
184 None)) 183 None])
185 184
186 return potential_culprit_tuple_list 185 return suspected_cls_with_failures
187 186
188 187
189 def _LinkAnalysisToBuildFailureGroup( 188 def _LinkAnalysisToBuildFailureGroup(
190 master_name, builder_name, build_number, failure_group_key): 189 master_name, builder_name, build_number, failure_group_key):
191 analysis = WfAnalysis.Get(master_name, builder_name, build_number) 190 analysis = WfAnalysis.Get(master_name, builder_name, build_number)
192 analysis.failure_group_key = failure_group_key 191 analysis.failure_group_key = failure_group_key
193 analysis.put() 192 analysis.put()
194 193
195 194
196 def _CreateBuildFailureGroup( 195 def _CreateBuildFailureGroup(
197 master_name, builder_name, build_number, build_failure_type, blame_list, 196 master_name, builder_name, build_number, build_failure_type, blame_list,
198 suspected_tuples, output_nodes=None, failed_steps_and_tests=None): 197 suspected_tuples, output_nodes=None, failed_steps_and_tests=None):
199 new_group = WfFailureGroup.Create(master_name, builder_name, build_number) 198 new_group = WfFailureGroup.Create(master_name, builder_name, build_number)
200 new_group.build_failure_type = build_failure_type 199 new_group.build_failure_type = build_failure_type
201 new_group.blame_list = blame_list 200 new_group.blame_list = blame_list
202 new_group.suspected_tuples = suspected_tuples 201 new_group.suspected_tuples = suspected_tuples
203 new_group.output_nodes = output_nodes 202 new_group.output_nodes = output_nodes
204 new_group.failed_steps_and_tests = failed_steps_and_tests 203 new_group.failed_steps_and_tests = failed_steps_and_tests
205 new_group.put() 204 new_group.put()
206 205
207 206
208 def _GetMatchingGroup(wf_failure_groups, blame_list, suspected_tuples): 207 def _GetMatchingGroup(wf_failure_groups, blame_list, suspected_tuples):
209 for group in wf_failure_groups: 208 for group in wf_failure_groups:
210 if _BlameListsIntersection(group.blame_list, blame_list): 209 if _BlameListsIntersection(group.blame_list, blame_list):
211 if suspected_tuples == group.suspected_tuples: 210 if suspected_tuples == group.suspected_tuples:
212 return group 211 return group
213 212
214 return None 213 return None
215 214
215
216 def _GetOutputNodes(signals): 216 def _GetOutputNodes(signals):
217 if not signals or 'compile' not in signals: 217 if not signals or 'compile' not in signals:
218 return [] 218 return []
219 219
220 # Compile failures with no output nodes will be considered unique. 220 # Compile failures with no output nodes will be considered unique.
221 return signals['compile'].get('failed_output_nodes', []) 221 return signals['compile'].get('failed_output_nodes', [])
222 222
223 223
224 def _GetMatchingCompileFailureGroups(output_nodes): 224 def _GetMatchingCompileFailureGroups(output_nodes):
225 # Output nodes should already be unique and sorted. 225 # Output nodes should already be unique and sorted.
226 return WfFailureGroup.query(ndb.AND( 226 return WfFailureGroup.query(ndb.AND(
227 WfFailureGroup.build_failure_type == failure_type.COMPILE, 227 WfFailureGroup.build_failure_type == failure_type.COMPILE,
228 WfFailureGroup.output_nodes == output_nodes 228 WfFailureGroup.output_nodes == output_nodes
229 )).fetch() 229 )).fetch()
230 230
231 231
232 def _GetMatchingTestFailureGroups(failed_steps_and_tests): 232 def _GetMatchingTestFailureGroups(failed_steps_and_tests):
chanli 2016/08/01 19:43:49 Have you done anything to make sure the order for
233 return WfFailureGroup.query(ndb.AND( 233 return WfFailureGroup.query(ndb.AND(
234 WfFailureGroup.build_failure_type == failure_type.TEST, 234 WfFailureGroup.build_failure_type == failure_type.TEST,
235 WfFailureGroup.failed_steps_and_tests == failed_steps_and_tests 235 WfFailureGroup.failed_steps_and_tests == failed_steps_and_tests
236 )).fetch() 236 )).fetch()
237 237
238 238
239 def _IsBuildFailureUniqueAcrossPlatforms( 239 def _IsBuildFailureUniqueAcrossPlatforms(
240 master_name, builder_name, build_number, build_failure_type, blame_list, 240 master_name, builder_name, build_number, build_failure_type, blame_list,
241 failed_steps, signals, heuristic_result): 241 failed_steps, signals, heuristic_result):
242 output_nodes = None 242 output_nodes = None
243 failed_steps_and_tests = None 243 failed_steps_and_tests = None
244 244
245 if build_failure_type == failure_type.COMPILE: 245 if build_failure_type == failure_type.COMPILE:
246 output_nodes = _GetOutputNodes(signals) 246 output_nodes = _GetOutputNodes(signals)
247 if not output_nodes: 247 if not output_nodes:
248 return True 248 return True
249 groups = _GetMatchingCompileFailureGroups(output_nodes) 249 groups = _GetMatchingCompileFailureGroups(output_nodes)
250 elif build_failure_type == failure_type.TEST: 250 elif build_failure_type == failure_type.TEST:
251 failed_steps_and_tests = _GetStepsAndTests(failed_steps) 251 failed_steps_and_tests = _GetStepsAndTests(failed_steps)
252 if not failed_steps_and_tests: 252 if not failed_steps_and_tests:
253 return True 253 return True
254 groups = _GetMatchingTestFailureGroups(failed_steps_and_tests) 254 groups = _GetMatchingTestFailureGroups(failed_steps_and_tests)
255 else: 255 else:
256 logging.info('Grouping %s failures is not supported. Only Compile and Test' 256 logging.info('Grouping %s failures is not supported. Only Compile and Test'
257 'failures can be grouped.' % 257 'failures can be grouped.' %
258 failure_type.GetDescriptionForFailureType(build_failure_type)) 258 failure_type.GetDescriptionForFailureType(build_failure_type))
259 return True 259 return True
260 260
261 suspected_tuples = sorted(GenPotentialCulpritTupleList(heuristic_result)) 261 suspected_tuples = sorted(GetSuspectedCLsWithFailures(heuristic_result))
262 existing_group = _GetMatchingGroup(groups, blame_list, suspected_tuples) 262 existing_group = _GetMatchingGroup(groups, blame_list, suspected_tuples)
263 263
264 # Create a new WfFailureGroup if we've encountered a unique build failure. 264 # Create a new WfFailureGroup if we've encountered a unique build failure.
265 if existing_group: 265 if existing_group:
266 logging.info('A group already exists, no need for a new try job.') 266 logging.info('A group already exists, no need for a new try job.')
267 _LinkAnalysisToBuildFailureGroup( 267 _LinkAnalysisToBuildFailureGroup(
268 master_name, builder_name, build_number, [existing_group.master_name, 268 master_name, builder_name, build_number,
269 existing_group.builder_name, existing_group.build_number]) 269 [existing_group.master_name, existing_group.builder_name,
270 existing_group.build_number])
270 else: 271 else:
271 logging.info('A new try job should be run for this unique build failure.') 272 logging.info('A new try job should be run for this unique build failure.')
272 _CreateBuildFailureGroup( 273 _CreateBuildFailureGroup(
273 master_name, builder_name, build_number, build_failure_type, blame_list, 274 master_name, builder_name, build_number, build_failure_type, blame_list,
274 suspected_tuples, output_nodes, failed_steps_and_tests) 275 suspected_tuples, output_nodes, failed_steps_and_tests)
275 _LinkAnalysisToBuildFailureGroup(master_name, builder_name, build_number, 276 _LinkAnalysisToBuildFailureGroup(master_name, builder_name, build_number,
276 [master_name, builder_name, build_number]) 277 [master_name, builder_name, build_number])
277 278
278 return not existing_group 279 return not existing_group
279 280
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after
430 pipeline.pipeline_status_path, try_job_type) 431 pipeline.pipeline_status_path, try_job_type)
431 else: # pragma: no cover 432 else: # pragma: no cover
432 logging_str = ( 433 logging_str = (
433 'Try job was scheduled for build %s, %s, %s: %s because of %s ' 434 'Try job was scheduled for build %s, %s, %s: %s because of %s '
434 'failure.') % ( 435 'failure.') % (
435 master_name, builder_name, build_number, 436 master_name, builder_name, build_number,
436 pipeline.pipeline_status_path, try_job_type) 437 pipeline.pipeline_status_path, try_job_type)
437 logging.info(logging_str) 438 logging.info(logging_str)
438 439
439 return failure_result_map 440 return failure_result_map
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698