OLD | NEW |
1 # Copyright (c) 2014 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2014 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 import os | 5 import os |
6 from threading import Lock, Thread | 6 from threading import Lock |
7 | 7 |
8 import blame | 8 import blame |
9 from common import utils | 9 from common import utils |
10 import component_dictionary | 10 import component_dictionary |
11 import crash_utils | 11 import crash_utils |
12 import git_repository_parser | 12 import git_repository_parser |
13 import match_set | 13 import match_set |
14 import svn_repository_parser | 14 import svn_repository_parser |
15 | 15 |
16 | 16 |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
132 stacktrace. | 132 stacktrace. |
133 component_path: The path of the component to search for. | 133 component_path: The path of the component to search for. |
134 component_name: The name of the component to search for. | 134 component_name: The name of the component to search for. |
135 repository_parser: The parser object to parse the line diff. | 135 repository_parser: The parser object to parse the line diff. |
136 codereview_api_url: A code review url to retrieve data from. | 136 codereview_api_url: A code review url to retrieve data from. |
137 | 137 |
138 Returns: | 138 Returns: |
139 Matches, a set of match objects. | 139 Matches, a set of match objects. |
140 """ | 140 """ |
141 matches = match_set.MatchSet(codereview_api_url) | 141 matches = match_set.MatchSet(codereview_api_url) |
142 threads = [] | |
143 | 142 |
| 143 tasks = [] |
144 # Iterate through the crashed files in the stacktrace. | 144 # Iterate through the crashed files in the stacktrace. |
145 for crashed_file_path in file_to_crash_info: | 145 for crashed_file_path in file_to_crash_info: |
146 # Ignore header file. | 146 # Ignore header file. |
147 if crashed_file_path.endswith('.h'): | 147 if crashed_file_path.endswith('.h'): |
148 continue | 148 continue |
149 | 149 |
150 # If the file in the stacktrace is not changed in any commits, continue. | 150 # If the file in the stacktrace is not changed in any commits, continue. |
151 for changed_file_path in file_to_revision_info: | 151 for changed_file_path in file_to_revision_info: |
152 changed_file_name = changed_file_path.split('/')[-1].lower() | 152 changed_file_name = changed_file_path.split('/')[-1].lower() |
153 crashed_file_name = crashed_file_path.split('/')[-1].lower() | 153 crashed_file_name = crashed_file_path.split('/')[-1].lower() |
(...skipping 11 matching lines...) Expand all Loading... |
165 functions = file_to_crash_info.GetCrashFunctions(crashed_file_path) | 165 functions = file_to_crash_info.GetCrashFunctions(crashed_file_path) |
166 | 166 |
167 # Iterate through the CLs that this file path is changed. | 167 # Iterate through the CLs that this file path is changed. |
168 for (cl, file_change_type) in file_to_revision_info[changed_file_path]: | 168 for (cl, file_change_type) in file_to_revision_info[changed_file_path]: |
169 # If the file change is delete, ignore this CL. | 169 # If the file change is delete, ignore this CL. |
170 if file_change_type == 'D': | 170 if file_change_type == 'D': |
171 continue | 171 continue |
172 | 172 |
173 revision = revisions_info_map[cl] | 173 revision = revisions_info_map[cl] |
174 | 174 |
175 match_thread = Thread( | 175 tasks.append({ |
176 target=GenerateMatchEntry, | 176 'function': GenerateMatchEntry, |
177 args=[matches, revision, cl, changed_file_path, functions, | 177 'args': [matches, revision, cl, changed_file_path, functions, |
178 component_path, component_name, crashed_line_numbers, | 178 component_path, component_name, crashed_line_numbers, |
179 stack_frame_nums, file_change_type, | 179 stack_frame_nums, file_change_type, |
180 repository_parser]) | 180 repository_parser] |
181 threads.append(match_thread) | 181 }) |
182 match_thread.start() | |
183 | 182 |
184 for match_thread in threads: | 183 # Run all the tasks. |
185 match_thread.join() | 184 crash_utils.RunTasks(tasks) |
186 | 185 |
187 matches.RemoveRevertedCLs() | 186 matches.RemoveRevertedCLs() |
188 | 187 |
189 return matches | 188 return matches |
190 | 189 |
191 | 190 |
192 def FindMatchForComponent(component_path, file_to_crash_info, changelog, | 191 def FindMatchForComponent(component_path, file_to_crash_info, changelog, |
193 callstack_priority, results, results_lock): | 192 callstack_priority, results, results_lock): |
194 """Parses changelog and finds suspected CLs for a given component. | 193 """Parses changelog and finds suspected CLs for a given component. |
195 | 194 |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
234 components: A set of components to look for. | 233 components: A set of components to look for. |
235 component_to_changelog_map: A map from component to its parsed changelog. | 234 component_to_changelog_map: A map from component to its parsed changelog. |
236 results: A list to aggregrate results from all stacktraces. | 235 results: A list to aggregrate results from all stacktraces. |
237 results_lock: A lock that guards results. | 236 results_lock: A lock that guards results. |
238 """ | 237 """ |
239 # Create component dictionary from the component and call stack. | 238 # Create component dictionary from the component and call stack. |
240 component_dict = component_dictionary.ComponentDictionary(callstack, | 239 component_dict = component_dictionary.ComponentDictionary(callstack, |
241 components) | 240 components) |
242 callstack_priority = callstack.priority | 241 callstack_priority = callstack.priority |
243 | 242 |
| 243 tasks = [] |
244 # Iterate through all components and create new thread for each component. | 244 # Iterate through all components and create new thread for each component. |
245 threads = [] | |
246 for component_path in component_dict: | 245 for component_path in component_dict: |
247 # If the component to consider in this callstack is not in the parsed list | 246 # If the component to consider in this callstack is not in the parsed list |
248 # of components, ignore this one. | 247 # of components, ignore this one. |
249 if component_path not in component_to_changelog_map: | 248 if component_path not in component_to_changelog_map: |
250 continue | 249 continue |
251 | 250 |
252 changelog = component_to_changelog_map[component_path] | 251 changelog = component_to_changelog_map[component_path] |
253 file_to_crash_info = component_dict.GetFileDict(component_path) | 252 file_to_crash_info = component_dict.GetFileDict(component_path) |
254 t = Thread( | 253 tasks.append({ |
255 target=FindMatchForComponent, | 254 'function': FindMatchForComponent, |
256 args=[component_path, file_to_crash_info, changelog, | 255 'args': [component_path, file_to_crash_info, changelog, |
257 callstack_priority, results, results_lock]) | 256 callstack_priority, results, results_lock] |
258 threads.append(t) | 257 }) |
259 t.start() | |
260 | 258 |
261 for t in threads: | 259 # Run all the tasks. |
262 t.join() | 260 crash_utils.RunTasks(tasks) |
263 | 261 |
264 | 262 |
265 def FindMatchForStacktrace(stacktrace, components, | 263 def FindMatchForStacktrace(stacktrace, components, |
266 component_to_regression_dict): | 264 component_to_regression_dict): |
267 """Finds the culprit CL for stacktrace. | 265 """Finds the culprit CL for stacktrace. |
268 | 266 |
269 The passed stacktrace is either from release build stacktrace | 267 The passed stacktrace is either from release build stacktrace |
270 or debug build stacktrace. | 268 or debug build stacktrace. |
271 | 269 |
272 Args: | 270 Args: |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
313 # If the returned map from ParseChangeLog is empty, we don't need to look | 311 # If the returned map from ParseChangeLog is empty, we don't need to look |
314 # further because either the parsing failed or the changelog is empty. | 312 # further because either the parsing failed or the changelog is empty. |
315 if not (revisions and file_to_revision_map): | 313 if not (revisions and file_to_revision_map): |
316 continue | 314 continue |
317 | 315 |
318 component_to_changelog_map[component_path] = (repository_parser, | 316 component_to_changelog_map[component_path] = (repository_parser, |
319 component_name, | 317 component_name, |
320 revisions, | 318 revisions, |
321 file_to_revision_map) | 319 file_to_revision_map) |
322 | 320 |
| 321 tasks = [] |
323 # Create separate threads for each of the call stack in the stacktrace. | 322 # Create separate threads for each of the call stack in the stacktrace. |
324 threads = [] | |
325 for callstack in stacktrace.stack_list: | 323 for callstack in stacktrace.stack_list: |
326 t = Thread( | 324 tasks.append({ |
327 target=FindMatchForCallstack, | 325 'function': FindMatchForCallstack, |
328 args=[callstack, components, component_to_changelog_map, | 326 'args': [callstack, components, component_to_changelog_map, |
329 results, results_lock]) | 327 results, results_lock] |
330 threads.append(t) | 328 }) |
331 t.start() | |
332 | 329 |
333 for t in threads: | 330 # Run all the tasks. |
334 t.join() | 331 crash_utils.RunTasks(tasks) |
335 | 332 |
336 return results | 333 return results |
337 | 334 |
338 | 335 |
339 def SortMatchesFunction(match_with_stack_priority): | 336 def SortMatchesFunction(match_with_stack_priority): |
340 """A function to sort the match triple. | 337 """A function to sort the match triple. |
341 | 338 |
342 Currently, it sorts the list by: | 339 Currently, it sorts the list by: |
343 1) The highest priority file change in the CL (changing crashed line is | 340 1) The highest priority file change in the CL (changing crashed line is |
344 higher priority than just changing the file). | 341 higher priority than just changing the file). |
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
470 file_string = 'File %s is changed in this cl ' | 467 file_string = 'File %s is changed in this cl ' |
471 else: | 468 else: |
472 file_string = 'Files %s are changed in this cl ' | 469 file_string = 'Files %s are changed in this cl ' |
473 | 470 |
474 # Create a list of file names, and prettify the list. | 471 # Create a list of file names, and prettify the list. |
475 file_names = [ | 472 file_names = [ |
476 file_name for (_, _, file_name, _, _) in rest_of_the_files] | 473 file_name for (_, _, file_name, _, _) in rest_of_the_files] |
477 pretty_file_names = crash_utils.PrettifyList(file_names) | 474 pretty_file_names = crash_utils.PrettifyList(file_names) |
478 | 475 |
479 # Add the reason, break because we took care of the rest of the files. | 476 # Add the reason, break because we took care of the rest of the files. |
480 file_string += '(%s)' % crash_utils.PrettifyFrameInfo( | 477 file_string += ('(and is part of stack %s)' % |
481 stack_frame_indices, function_list) | 478 crash_utils.PrettifyFrameInfo(stack_frame_indices, function_list)) |
482 reason.append(file_string % pretty_file_names) | 479 reason.append(file_string % pretty_file_names) |
483 break | 480 break |
484 | 481 |
485 # Set the reason as string. | 482 # Set the reason as string. |
486 match.reason = '\n'.join(reason) | 483 match.reason = '\n'.join(reason) |
487 | 484 |
488 | 485 |
489 def CombineMatches(matches): | 486 def CombineMatches(matches): |
490 """Combine possible duplicates in matches. | 487 """Combine possible duplicates in matches. |
491 | 488 |
(...skipping 23 matching lines...) Expand all Loading... |
515 # Combine the reason if the current match is already in there. | 512 # Combine the reason if the current match is already in there. |
516 found_match.reason += match.reason | 513 found_match.reason += match.reason |
517 if match.min_distance < found_match.min_distance: | 514 if match.min_distance < found_match.min_distance: |
518 found_match.min_distance = match.min_distance | 515 found_match.min_distance = match.min_distance |
519 found_match.min_distance_info = match.min_distance_info | 516 found_match.min_distance_info = match.min_distance_info |
520 | 517 |
521 for stack_index, cl, match in combined_matches: | 518 for stack_index, cl, match in combined_matches: |
522 if match.min_distance_info: | 519 if match.min_distance_info: |
523 file_name, min_crashed_line, min_changed_line = match.min_distance_info | 520 file_name, min_crashed_line, min_changed_line = match.min_distance_info |
524 match.reason += \ | 521 match.reason += \ |
525 ('Minimum distance from crashed line to changed line: %d. ' | 522 ('\nMinimum distance from crash line to modified line: %d. ' |
526 '(File: %s, Crashed on: %d, Changed: %d).\n' % | 523 '(file: %s, crashed on: %d, modified: %d).\n' % |
527 (match.min_distance, file_name, min_crashed_line, min_changed_line)) | 524 (match.min_distance, file_name, min_crashed_line, min_changed_line)) |
528 | 525 |
529 return combined_matches | 526 return combined_matches |
530 | 527 |
531 | 528 |
532 def FilterAndGenerateReasonForMatches(result): | 529 def FilterAndGenerateReasonForMatches(result): |
533 """A wrapper function. | 530 """A wrapper function. |
534 | 531 |
535 It generates reasons for the matches and returns string representation | 532 It generates reasons for the matches and returns string representation |
536 of filtered results. | 533 of filtered results. |
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
661 return (return_message, filtered_result) | 658 return (return_message, filtered_result) |
662 | 659 |
663 # If no match is found, return the blame information for the input | 660 # If no match is found, return the blame information for the input |
664 # callstack. | 661 # callstack. |
665 result = GenerateAndFilterBlameList( | 662 result = GenerateAndFilterBlameList( |
666 callstack, component_to_crash_revision_dict, | 663 callstack, component_to_crash_revision_dict, |
667 component_to_regression_dict) | 664 component_to_regression_dict) |
668 | 665 |
669 if result: | 666 if result: |
670 return_message = ( | 667 return_message = ( |
671 'No CL in the regression changes the crashed files. The result is ' | 668 'No CL in the regression range changes the crashed files. ' |
672 'the blame information.') | 669 'The result is the blame information.') |
673 | 670 |
674 # When findit could not find any CL that changes file in stacktrace or if | 671 # When findit could not find any CL that changes file in stacktrace or if |
675 # if cannot get any blame information, return a message saying that no | 672 # if cannot get any blame information, return a message saying that no |
676 # results are available. | 673 # results are available. |
677 else: | 674 else: |
678 return_message = ('Findit could not find any suspected CLs.') | 675 return_message = ('Findit could not find any suspected CLs.') |
679 | 676 |
680 return (return_message, result) | 677 return (return_message, result) |
681 | 678 |
OLD | NEW |