OLD | NEW |
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 # These are fake fetchers that are used for testing and the preview server. | 5 # These are fake fetchers that are used for testing and the preview server. |
6 # They return canned responses for URLs. appengine_wrappers.py uses the fake | 6 # They return canned responses for URLs. appengine_wrappers.py uses the fake |
7 # fetchers if the App Engine imports fail. | 7 # fetchers if the App Engine imports fail. |
8 | 8 |
9 import os | 9 import os |
10 import re | 10 import re |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
49 try: | 49 try: |
50 for f in self._ListDir(path): | 50 for f in self._ListDir(path): |
51 if f.startswith('.'): | 51 if f.startswith('.'): |
52 continue | 52 continue |
53 if self._IsDir(os.path.join(path, f)): | 53 if self._IsDir(os.path.join(path, f)): |
54 html.append('<a>' + f + '/</a>') | 54 html.append('<a>' + f + '/</a>') |
55 else: | 55 else: |
56 html.append('<a>' + f + '</a>') | 56 html.append('<a>' + f + '</a>') |
57 html.append('</html>') | 57 html.append('</html>') |
58 return '\n'.join(html) | 58 return '\n'.join(html) |
59 except OSError: | 59 except OSError as e: |
60 raise FileNotFoundError(path) | 60 raise FileNotFoundError('Listing %s failed: %s' (path, e)) |
61 try: | 61 try: |
62 return self._ReadFile(path) | 62 return self._ReadFile(path) |
63 except IOError: | 63 except IOError as e: |
64 raise FileNotFoundError(path) | 64 raise FileNotFoundError('Reading %s failed: %s' (path, e)) |
65 | 65 |
66 class FakeViewvcServer(_FakeFetcher): | 66 class FakeViewvcServer(_FakeFetcher): |
67 def __init__(self, base_path): | 67 def __init__(self, base_path): |
68 _FakeFetcher.__init__(self, base_path) | 68 _FakeFetcher.__init__(self, base_path) |
69 self._base_pattern = re.compile(r'.*chrome/common/extensions/(.*)') | 69 self._base_pattern = re.compile(r'.*chrome/common/extensions/(.*)') |
70 | 70 |
71 def fetch(self, url): | 71 def fetch(self, url): |
72 path = os.path.join(os.pardir, self._base_pattern.match(url).group(1)) | 72 path = os.path.join(os.pardir, self._base_pattern.match(url).group(1)) |
73 if self._IsDir(path): | 73 if self._IsDir(path): |
74 html = ['<table><tbody><tr>...</tr>'] | 74 html = ['<table><tbody><tr>...</tr>'] |
75 for f in self._ListDir(path): | 75 for f in self._ListDir(path): |
76 if f.startswith('.'): | 76 if f.startswith('.'): |
77 continue | 77 continue |
78 html.append('<tr>') | 78 html.append('<tr>') |
79 html.append(' <td><a>%s%s</a></td>' % ( | 79 html.append(' <td><a>%s%s</a></td>' % ( |
80 f, '/' if self._IsDir(os.path.join(path, f)) else '')) | 80 f, '/' if self._IsDir(os.path.join(path, f)) else '')) |
81 stat = self._Stat(os.path.join(path, f)) | 81 stat = self._Stat(os.path.join(path, f)) |
82 html.append(' <td><a><strong>%s</strong></a></td>' % stat) | 82 html.append(' <td><a><strong>%s</strong></a></td>' % stat) |
83 html.append('<td></td><td></td><td></td>') | 83 html.append('<td></td><td></td><td></td>') |
84 html.append('</tr>') | 84 html.append('</tr>') |
85 html.append('</tbody></table>') | 85 html.append('</tbody></table>') |
86 return '\n'.join(html) | 86 return '\n'.join(html) |
87 try: | 87 try: |
88 return self._ReadFile(path) | 88 return self._ReadFile(path) |
89 except IOError: | 89 except IOError as e: |
90 raise FileNotFoundError(path) | 90 raise FileNotFoundError('Reading %s failed: %s' % (path, e)) |
91 | 91 |
92 class FakeGithubStat(_FakeFetcher): | 92 class FakeGithubStat(_FakeFetcher): |
93 def fetch(self, url): | 93 def fetch(self, url): |
94 return '{ "commit": { "tree": { "sha": 0} } }' | 94 return '{ "commit": { "tree": { "sha": 0} } }' |
95 | 95 |
96 class FakeGithubZip(_FakeFetcher): | 96 class FakeGithubZip(_FakeFetcher): |
97 def fetch(self, url): | 97 def fetch(self, url): |
98 try: | 98 try: |
99 return self._ReadFile(os.path.join('server2', | 99 return self._ReadFile(os.path.join('server2', |
100 'test_data', | 100 'test_data', |
(...skipping 12 matching lines...) Expand all Loading... |
113 ''' | 113 ''' |
114 appengine_wrappers.ConfigureFakeUrlFetch({ | 114 appengine_wrappers.ConfigureFakeUrlFetch({ |
115 url_constants.OMAHA_PROXY_URL: FakeOmahaProxy(docs), | 115 url_constants.OMAHA_PROXY_URL: FakeOmahaProxy(docs), |
116 '%s/.*' % url_constants.SVN_URL: FakeSubversionServer(docs), | 116 '%s/.*' % url_constants.SVN_URL: FakeSubversionServer(docs), |
117 '%s/.*' % url_constants.VIEWVC_URL: FakeViewvcServer(docs), | 117 '%s/.*' % url_constants.VIEWVC_URL: FakeViewvcServer(docs), |
118 '%s/commits/.*' % url_constants.GITHUB_URL: FakeGithubStat(docs), | 118 '%s/commits/.*' % url_constants.GITHUB_URL: FakeGithubStat(docs), |
119 '%s/zipball' % url_constants.GITHUB_URL: FakeGithubZip(docs), | 119 '%s/zipball' % url_constants.GITHUB_URL: FakeGithubZip(docs), |
120 re.escape(url_constants.OPEN_ISSUES_CSV_URL): FakeIssuesFetcher(docs), | 120 re.escape(url_constants.OPEN_ISSUES_CSV_URL): FakeIssuesFetcher(docs), |
121 re.escape(url_constants.CLOSED_ISSUES_CSV_URL): FakeIssuesFetcher(docs) | 121 re.escape(url_constants.CLOSED_ISSUES_CSV_URL): FakeIssuesFetcher(docs) |
122 }) | 122 }) |
OLD | NEW |