OLD | NEW |
---|---|
1 # Copyright (c) 2014 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2014 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 import cgi | 5 import cgi |
6 import ConfigParser | 6 import ConfigParser |
7 import json | 7 import json |
8 import logging | 8 import logging |
9 import os | 9 import os |
10 import time | 10 import time |
11 import urllib2 | 11 import urllib2 |
12 | 12 |
13 from common import utils | |
13 from result import Result | 14 from result import Result |
14 | 15 |
15 | 16 |
16 INFINITY = float('inf') | 17 INFINITY = float('inf') |
17 | 18 |
18 | 19 |
19 def ParseURLsFromConfig(file_name): | 20 def ParseURLsFromConfig(file_name): |
20 """Parses URLS from the config file. | 21 """Parses URLS from the config file. |
21 | 22 |
22 The file should be in python config format, where svn section is in the | 23 The file should be in python config format, where svn section is in the |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
75 type_to_url = url_map_for_repository[component_path] | 76 type_to_url = url_map_for_repository[component_path] |
76 | 77 |
77 # Add all URLs to this map. | 78 # Add all URLs to this map. |
78 for option in config.options(section): | 79 for option in config.options(section): |
79 url = config.get(section, option) | 80 url = config.get(section, option) |
80 type_to_url[option] = url | 81 type_to_url[option] = url |
81 | 82 |
82 return repository_type_to_url_map | 83 return repository_type_to_url_map |
83 | 84 |
84 | 85 |
85 def NormalizePathLinux(path, parsed_deps): | 86 def NormalizePathLinux(path, parsed_deps): |
Martin Barbella
2014/08/15 23:00:02
Is this being called directly at the moment? There
| |
86 """Normalizes linux path. | 87 """Normalizes linux path. |
87 | 88 |
88 Args: | 89 Args: |
89 path: A string representing a path. | 90 path: A string representing a path. |
90 parsed_deps: A map from component path to its component name, repository, | 91 parsed_deps: A map from component path to its component name, repository, |
91 etc. | 92 etc. |
92 | 93 |
93 Returns: | 94 Returns: |
94 A tuple containing a component this path is in (e.g blink, skia, etc) | 95 A tuple containing a component this path is in (e.g blink, skia, etc) |
95 and a path in that component's repository. | 96 and a path in that component's repository. |
96 """ | 97 """ |
97 # First normalize the path by retreiving the absolute path. | 98 # First normalize the path by retreiving the absolute path. |
98 normalized_path = os.path.abspath(path) | 99 normalized_path = os.path.abspath(path) |
99 | 100 |
100 # Iterate through all component paths in the parsed DEPS, in the decreasing | 101 # Iterate through all component paths in the parsed DEPS, in the decreasing |
101 # order of the length of the file path. | 102 # order of the length of the file path. |
102 for component_path in sorted(parsed_deps, | 103 for component_path in sorted(parsed_deps, |
103 key=(lambda path: -len(path))): | 104 key=(lambda path: -len(path))): |
104 # New_path is the component path with 'src/' removed. | 105 # New_path is the component path with 'src/' removed. |
105 new_path = component_path | 106 new_path = component_path |
106 if new_path.startswith('src/') and new_path != 'src/': | 107 if new_path.startswith('src/') and new_path != 'src/': |
107 new_path = new_path[len('src/'):] | 108 new_path = new_path[len('src/'):] |
108 | 109 |
109 # If this path is the part of file path, this file must be from this | 110 # If this path is the part of file path, this file must be from this |
110 # component. | 111 # component. |
111 if new_path in normalized_path: | 112 if new_path in normalized_path: |
112 | 113 |
113 # Currently does not support googlecode. | 114 # Currently does not support googlecode. |
114 if 'googlecode' in parsed_deps[component_path]['repository']: | 115 if 'googlecode' in parsed_deps[component_path]['repository']: |
115 return (None, '', '') | 116 return (None, '', '') |
Martin Barbella
2014/08/15 23:00:02
Do the callees of this handle this case properly?
| |
116 | 117 |
117 # Normalize the path by stripping everything off the component's relative | 118 # Normalize the path by stripping everything off the component's relative |
118 # path. | 119 # path. |
119 normalized_path = normalized_path.split(new_path,1)[1] | 120 normalized_path = normalized_path.split(new_path,1)[1] |
120 | 121 |
121 # Add 'src/' or 'Source/' at the front of the normalized path, depending | 122 # Add 'src/' or 'Source/' at the front of the normalized path, depending |
122 # on what prefix the component path uses. For example, blink uses | 123 # on what prefix the component path uses. For example, blink uses |
123 # 'Source' but chromium uses 'src/', and blink component path is | 124 # 'Source' but chromium uses 'src/', and blink component path is |
124 # 'src/third_party/WebKit/Source', so add 'Source/' in front of the | 125 # 'src/third_party/WebKit/Source', so add 'Source/' in front of the |
125 # normalized path. | 126 # normalized path. |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
189 | 190 |
190 Args: | 191 Args: |
191 url: URL to get data from. | 192 url: URL to get data from. |
192 retries: Number of times to retry connection. | 193 retries: Number of times to retry connection. |
193 sleep_time: Time in seconds to wait before retrying connection. | 194 sleep_time: Time in seconds to wait before retrying connection. |
194 timeout: Time in seconds to wait before time out. | 195 timeout: Time in seconds to wait before time out. |
195 | 196 |
196 Returns: | 197 Returns: |
197 None if the data retrieval fails, or the raw data. | 198 None if the data retrieval fails, or the raw data. |
198 """ | 199 """ |
199 data = None | 200 count = 0 |
200 for i in range(retries): | 201 while True: |
202 count += 1 | |
201 # Retrieves data from URL. | 203 # Retrieves data from URL. |
202 try: | 204 try: |
203 data = urllib2.urlopen(url, timeout=timeout) | 205 _, data = utils.GetHttpClient().Get(url) |
204 | 206 return data |
205 # If retrieval is successful, return the data. | |
206 if data: | |
207 return data.read() | |
208 | |
209 # If retrieval fails, try after sleep_time second. | |
210 except urllib2.URLError: | |
211 time.sleep(sleep_time) | |
212 continue | |
213 except IOError: | 207 except IOError: |
214 time.sleep(sleep_time) | 208 if count < retries: |
215 continue | 209 # If retrieval fails, try after sleep_time second. |
210 time.sleep(sleep_time) | |
211 else: | |
212 break | |
216 | 213 |
217 # Return None if it fails to read data from URL 'retries' times. | 214 # Return None if it fails to read data from URL 'retries' times. |
218 return None | 215 return None |
219 | 216 |
220 | 217 |
221 def FindMinLineDistance(crashed_line_list, changed_line_numbers): | 218 def FindMinLineDistance(crashed_line_list, changed_line_numbers): |
222 """Calculates how far the changed line is from one of the crashes. | 219 """Calculates how far the changed line is from one of the crashes. |
223 | 220 |
224 Finds the minimum distance between the lines that the file crashed on | 221 Finds the minimum distance between the lines that the file crashed on |
225 and the lines that the file changed. For example, if the file crashed on | 222 and the lines that the file changed. For example, if the file crashed on |
(...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
428 A list of result objects. | 425 A list of result objects. |
429 """ | 426 """ |
430 result_list = [] | 427 result_list = [] |
431 | 428 |
432 for blame in blame_list: | 429 for blame in blame_list: |
433 suspected_cl = blame.revision | 430 suspected_cl = blame.revision |
434 revision_url = blame.url | 431 revision_url = blame.url |
435 component_name = blame.component_name | 432 component_name = blame.component_name |
436 author = blame.author | 433 author = blame.author |
437 reason = ( | 434 reason = ( |
438 'The CL changes line %s of file %s from stack %d.' % | 435 'The CL changes line %s of file %s from stack %d.' % |
Martin Barbella
2014/08/15 23:00:02
If we are planning on using this output directly a
| |
439 (blame.line_number, blame.file, blame.stack_frame_index)) | 436 (blame.line_number, blame.file, blame.stack_frame_index)) |
440 # Blame object does not have review url and reviewers. | 437 # Blame object does not have review url and reviewers. |
441 review_url = None | 438 review_url = None |
442 reviewers = None | 439 reviewers = None |
443 line_content = blame.content | 440 line_content = blame.content |
444 | 441 |
445 result = Result(suspected_cl, revision_url, component_name, author, reason, | 442 result = Result(suspected_cl, revision_url, component_name, author, reason, |
446 review_url, reviewers, line_content) | 443 review_url, reviewers, line_content) |
447 result_list.append(result) | 444 result_list.append(result) |
448 | 445 |
449 return result_list | 446 return result_list |
450 | 447 |
451 | 448 |
452 def ResultListToJSON(result_list): | 449 def ResultListToJSON(result_list): |
453 """Converts result list to JSON format. | 450 """Converts result list to JSON format. |
454 | 451 |
455 Args: | 452 Args: |
456 result_list: A list of result objects | 453 result_list: A list of result objects |
457 | 454 |
458 Returns: | 455 Returns: |
459 A string, JSON format of the result_list. | 456 A string, JSON format of the result_list. |
460 | 457 |
461 """ | 458 """ |
462 return json.dumps([result.ToDictionary() for result in result_list]) | 459 return json.dumps([result.ToDictionary() for result in result_list]) |
OLD | NEW |