OLD | NEW |
---|---|
(Empty) | |
1 #!/usr/bin/python | |
2 # Copyright (c) 2012 The Native Client Authors. All rights reserved. | |
3 # Use of this source code is governed by a BSD-style license that can be | |
4 # found in the LICENSE file. | |
5 | |
6 import codecs | |
7 import hashlib | |
8 import json | |
9 import math | |
10 import os | |
11 import shutil | |
12 import struct | |
13 import subprocess | |
14 import sys | |
15 import threading | |
16 import time | |
17 import zipfile | |
18 | |
19 SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) | |
20 TESTS_DIR = os.path.dirname(SCRIPT_DIR) | |
21 NACL_DIR = os.path.dirname(TESTS_DIR) | |
22 | |
23 # Imports from the build directory. | |
24 sys.path.insert(0, os.path.join(NACL_DIR, 'build')) | |
25 from download_utils import RemoveDir | |
26 | |
27 | |
28 class DownloadError(Exception): | |
29 """Indicates a download failed.""" | |
30 pass | |
31 | |
32 | |
33 class FailedTests(Exception): | |
34 """Indicates a test run failed.""" | |
35 pass | |
36 | |
37 | |
38 def GsutilCopySilent(src, dst): | |
39 """Invoke gsutil cp, swallowing the output, with retry. | |
40 | |
41 Args: | |
42 src: src url. | |
43 dst: dst path. | |
44 """ | |
45 env = os.environ.copy() | |
46 env['PATH'] = '/b/build/scripts/slave' + os.pathsep + env['PATH'] | |
47 # Retry to compensate for storage flake. | |
48 for attempt in range(3): | |
49 process = subprocess.Popen( | |
50 ['gsutil', 'cp', src, dst], | |
51 env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE) | |
52 process_stdout, process_stderr = process.communicate() | |
53 if process.returncode == 0: | |
54 return | |
55 time.sleep(math.pow(2, attempt + 1) * 5) | |
56 raise DownloadError( | |
57 'Unexpected return code: %s\n' | |
58 '>>> STDOUT\n%s\n' | |
59 '>>> STDERR\n%s\n' % ( | |
60 process.returncode, process_stdout, process_stderr)) | |
61 | |
62 | |
63 def DownloadCorpusCRXList(list_filename): | |
64 """Download list of all crx files in test corpus. | |
65 | |
66 Args: | |
67 list_filename: destination filename (kept around for debugging). | |
68 Returns: | |
69 List of CRXs. | |
70 """ | |
71 DownloadFileFromCorpus('naclapps.all', list_filename) | |
72 fh = open(list_filename) | |
73 filenames = fh.read().splitlines() | |
74 fh.close() | |
75 crx_filenames = [f for f in filenames if f.endswith('.crx')] | |
76 return crx_filenames | |
77 | |
78 | |
79 def DownloadNexeList(filename): | |
80 """Download list of NEXEs. | |
81 | |
82 Args: | |
83 filename: destination filename. | |
84 Returns: | |
85 List of NEXEs. | |
86 """ | |
87 GsutilCopySilent('gs://nativeclient-snaps/naclapps.list', filename) | |
Nick Bray (chromium)
2012/04/12 21:33:03
Why not DownloadFileFromCorpus?
bradn
2012/04/13 00:14:50
Done.
| |
88 fh = open(filename) | |
89 filenames = fh.read().splitlines() | |
90 fh.close() | |
91 return filenames | |
92 | |
93 | |
94 def DownloadFileFromCorpus(src_path, dst_filename): | |
Nick Bray (chromium)
2012/04/12 21:33:03
Move up, after GsutilCopySilent?
bradn
2012/04/13 00:14:50
Done.
| |
95 """Download a file from our snapshot. | |
96 | |
97 Args: | |
98 src_path: datastore relative path to download from. | |
99 dst_filename: destination filename. | |
100 """ | |
101 GsutilCopySilent('gs://nativeclient-snaps/%s' % src_path, dst_filename) | |
102 | |
103 | |
104 def Sha1Digest(path): | |
105 """Determine the sha1 hash of a file's contents given its path.""" | |
106 m = hashlib.sha1() | |
107 fh = open(path, 'rb') | |
108 m.update(fh.read()) | |
109 fh.close() | |
110 return m.hexdigest() | |
111 | |
112 | |
113 def Hex2Alpha(ch): | |
114 """Convert a hexadecimal digit from 0-9 / a-f to a-p. | |
115 | |
116 Args: | |
117 ch: a character in 0-9 / a-f. | |
118 Returns: | |
119 A character in a-p. | |
120 """ | |
121 if ch >= '0' and ch <= '9': | |
122 return chr(ord(ch) - ord('0') + ord('a')) | |
123 else: | |
124 return chr(ord(ch) + 10) | |
125 | |
126 | |
127 def ChromeAppIdFromPath(path): | |
128 """Converts a path to the corrisponding chrome app id. | |
129 | |
130 A stable but semi-undocumented property of unpacked chrome extensions is | |
131 that they are assigned an app-id based on the first 32 characters of the | |
132 sha256 digest of the absolute symlink expanded path of the extension. | |
133 Instead of hexadecimal digits, characters a-p. | |
134 From discussion with webstore team + inspection of extensions code. | |
135 Args: | |
136 path: Path to an unpacked extension. | |
137 Returns: | |
138 A 32 character chrome extension app id. | |
139 """ | |
140 hasher = hashlib.sha256() | |
141 hasher.update(os.path.realpath(path)) | |
142 hexhash = hasher.hexdigest()[:32] | |
143 return ''.join([Hex2Alpha(ch) for ch in hexhash]) | |
144 | |
145 | |
146 def RunWithTimeout(cmd, timeout): | |
147 """Run a program, capture output, allowing to run up to a timeout. | |
148 | |
149 Args: | |
150 cmd: List of strings containing command to run. | |
151 timeout: Duration to timeout. | |
152 Returns: | |
153 Tuple of stdout, stderr, returncode. | |
154 """ | |
155 process = subprocess.Popen(cmd, | |
156 stdout=subprocess.PIPE, | |
157 stderr=subprocess.PIPE) | |
158 # Put the read in another thread so the buffer doesn't fill up. | |
159 def GatherOutput(fh, dst): | |
160 dst.append(fh.read()) | |
161 # Gather stdout. | |
162 stdout_output = [] | |
163 stdout_thread = threading.Thread( | |
164 target=GatherOutput, args=(process.stdout, stdout_output)) | |
165 stdout_thread.start() | |
166 # Gather stderr. | |
167 stderr_output = [] | |
168 stderr_thread = threading.Thread( | |
169 target=GatherOutput, args=(process.stderr, stderr_output)) | |
170 stderr_thread.start() | |
171 # Wait for a small span for the app to load. | |
172 time.sleep(timeout) | |
173 process.kill() | |
174 # Join up. | |
175 process.wait() | |
176 stdout_thread.join() | |
177 stderr_thread.join() | |
178 # Pick out result. | |
179 return stdout_output[0], stderr_output[0], process.returncode | |
180 | |
181 | |
182 def LoadManifest(app_path): | |
183 manifest_data = codecs.open(os.path.join(app_path, 'manifest.json'), | |
184 'r', encoding='utf-8').read() | |
185 # Ignore CRs as they confuse json.loads. | |
186 manifest_data = manifest_data.replace('\r', '') | |
187 # Ignore unicode endian markers as they confuse json.loads. | |
188 manifest_data = manifest_data.replace(u'\ufeff', '') | |
189 manifest_data = manifest_data.replace(u'\uffee', '') | |
190 return json.loads(manifest_data) | |
191 | |
192 | |
193 def CachedPath(cache_dir, filename): | |
194 """Find the full path of a cached file, a cache root relative path. | |
195 | |
196 Args: | |
197 cache_dir: directory to keep the cache in. | |
198 filename: filename relative to the top of the download url / cache. | |
199 Returns: | |
200 Absolute path of where the file goes in the cache. | |
201 """ | |
202 return os.path.join(cache_dir, 'nacl_abi_corpus_cache', filename) | |
203 | |
204 | |
205 def Sha1FromFilename(filename): | |
206 """Get the expected sha1 of a file path. | |
207 | |
208 Throughout we use the convention that files are store to a name of the form: | |
209 <path_to_file>/<sha1hex>[.<some_extention>] | |
210 This function extracts the expected sha1. | |
211 | |
212 Args: | |
213 filename: filename to extract. | |
214 Returns: | |
215 Excepted sha1. | |
216 """ | |
217 return os.path.splitext(os.path.basename(filename))[0] | |
218 | |
219 | |
220 def PrimeCache(cache_dir, filename): | |
221 """Attempt to add a file to the cache directory if its not already there. | |
222 | |
223 Args: | |
224 cache_dir: directory to keep the cache in. | |
225 filename: filename relative to the top of the download url / cache. | |
226 """ | |
227 dpath = CachedPath(cache_dir, filename) | |
228 if (not os.path.exists(dpath) or | |
229 Sha1Digest(dpath) != Sha1FromFilename(filename)): | |
230 # Try to make the directory, fail is ok, let the download fail instead. | |
231 try: | |
232 os.makedirs(os.path.basename(dpath)) | |
233 except OSError: | |
234 pass | |
235 DownloadFileFromCorpus(filename, dpath) | |
236 | |
237 | |
238 def CopyFromCache(cache_dir, filename, dest_filename): | |
239 """Copy an item from the cache. | |
240 | |
241 Args: | |
242 cache_dir: directory to keep the cache in. | |
243 filename: filename relative to the top of the download url / cache. | |
244 dest_filename: location to copy the file to. | |
245 """ | |
246 dpath = CachedPath(cache_dir, filename) | |
247 shutil.copy(dpath, dest_filename) | |
248 assert Sha1Digest(dest_filename) == Sha1FromFilename(filename) | |
249 | |
250 | |
251 def ExtractFromCache(cache_dir, source, dest): | |
252 """Extract a crx from the cache. | |
253 | |
254 Args: | |
255 cache_dir: directory to keep the cache in. | |
256 source: crx file to extract (cache relative). | |
257 dest: location to extract to. | |
258 """ | |
259 # We don't want to accidentally extract two extensions on top of each other. | |
260 # Assert that the destination doesn't yet exist. | |
261 assert not os.path.exists(dest) | |
262 dpath = CachedPath(cache_dir, source) | |
263 # The cached location must exist. | |
264 assert os.path.exists(dpath) | |
265 zf = zipfile.ZipFile(dpath, 'r') | |
266 os.makedirs(dest) | |
267 for info in zf.infolist(): | |
268 # Skip directories. | |
269 if info.filename.endswith('/'): | |
270 continue | |
271 # Do not support absolute paths or paths containing .. | |
272 assert not os.path.isabs(info.filename) and '..' not in info.filename | |
Nick Bray (chromium)
2012/04/12 21:33:03
asserts can get compiled out and this is (potentia
bradn
2012/04/13 00:14:50
Done.
| |
273 tpath = os.path.join(dest, info.filename) | |
274 tdir = os.path.dirname(tpath) | |
275 if not os.path.exists(tdir): | |
276 os.makedirs(tdir) | |
277 zf.extract(info, dest) | |
278 zf.close() | |
279 | |
280 | |
281 def DefaultCacheDirectory(): | |
282 """Decide a default cache directory. | |
283 | |
284 Decide a default cache directory. | |
285 Prefer /b (for the bots) | |
286 Failing that, use scons-out. | |
287 Failing that, use the current users's home dir. | |
288 Returns: | |
289 Default to use for a corpus cache directory. | |
290 """ | |
291 default_cache_dir = '/b' | |
292 if not os.path.isdir(default_cache_dir): | |
293 default_cache_dir = os.path.join(NACL_DIR, 'scons-out') | |
294 if not os.path.isdir(default_cache_dir): | |
295 default_cache_dir = os.path.expanduser('~/') | |
296 default_cache_dir = os.path.realpath(default_cache_dir) | |
297 assert os.path.isdir(default_cache_dir) | |
298 assert os.path.realpath('.') != default_cache_dir | |
299 return default_cache_dir | |
300 | |
301 | |
302 def NexeArchitecture(filename): | |
303 """Decide the architecture of a nexe. | |
304 | |
305 Args: | |
306 filename: filename of the nexe. | |
307 Returns: | |
308 Architecture string (x86-32 / x86-64) or None. | |
309 """ | |
310 fh = open(filename, 'rb') | |
311 head = fh.read(20) | |
312 # Must not be too short. | |
313 if len(head) != 20: | |
314 print 'ERROR - header too short' | |
315 return None | |
316 # Must have ELF header. | |
317 if head[0:4] != '\x7fELF': | |
318 print 'ERROR - no elf header' | |
319 return None | |
320 # Decode e_machine | |
321 machine = struct.unpack('<H', head[18:])[0] | |
322 return { | |
323 3: 'x86-32', | |
324 #40: 'arm', # TODO(bradnelson): handle arm. | |
325 62: 'x86-64', | |
326 }.get(machine) | |
327 | |
328 | |
329 class Progress(object): | |
330 def __init__(self, total): | |
331 self.total = total | |
332 self.count = 0 | |
333 self.successes = 0 | |
334 self.failures = 0 | |
335 self.start = time.time() | |
336 | |
337 def Tally(self): | |
338 if self.count > 0: | |
339 tm = time.time() | |
340 eta = (self.total - self.count) * (tm - self.start) / self.count | |
341 eta_minutes = int(eta / 60) | |
342 eta_seconds = int(eta - eta_minutes * 60) | |
343 eta_str = ' (ETA %d:%02d)' % (eta_minutes, eta_seconds) | |
344 else: | |
345 eta_str = '' | |
346 self.count += 1 | |
347 print 'Processing %d of %d%s...' % (self.count, self.total, eta_str) | |
348 | |
349 def Result(self, success): | |
350 if success: | |
351 self.successes += 1 | |
352 else: | |
353 self.failures += 1 | |
354 | |
355 def Summary(self, warn_only=False): | |
356 print 'Ran tests on %d of %d items.' % ( | |
357 self.successes + self.failures, self.total) | |
358 if self.failures: | |
359 # Our alternate validators don't currently cover everything. | |
360 # For now, don't fail just emit warning (and a tally of failures). | |
361 print '@@@STEP_TEXT@FAILED %d times (%.1f%% are incorrect)@@@' % ( | |
362 self.failures, self.failures * 100 / (self.successes + self.failures)) | |
363 if warn_only: | |
364 print '@@@STEP_WARNINGS@@@' | |
365 else: | |
366 raise FailedTests('FAILED %d tests' % self.failures) | |
367 else: | |
368 print 'SUCCESS' | |
OLD | NEW |