Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
| 2 # Copyright 2013 The LUCI Authors. All rights reserved. | 2 # Copyright 2013 The LUCI Authors. All rights reserved. |
| 3 # Use of this source code is governed under the Apache License, Version 2.0 | 3 # Use of this source code is governed under the Apache License, Version 2.0 |
| 4 # that can be found in the LICENSE file. | 4 # that can be found in the LICENSE file. |
| 5 | 5 |
| 6 """Archives a set of files or directories to an Isolate Server.""" | 6 """Archives a set of files or directories to an Isolate Server.""" |
| 7 | 7 |
| 8 __version__ = '0.4.9' | 8 __version__ = '0.4.9' |
| 9 | 9 |
| 10 import base64 | 10 import base64 |
| 11 import functools | 11 import functools |
| 12 import errno | 12 import errno |
| 13 import logging | 13 import logging |
| 14 import optparse | 14 import optparse |
| 15 import os | 15 import os |
| 16 import re | 16 import re |
| 17 import stat | |
| 17 import signal | 18 import signal |
| 18 import sys | 19 import sys |
| 19 import tempfile | 20 import tempfile |
| 20 import threading | 21 import threading |
| 21 import time | 22 import time |
| 22 import types | 23 import types |
| 23 import zlib | 24 import zlib |
| 24 | 25 |
| 26 import cStringIO as StringIO | |
| 27 | |
| 25 from third_party import colorama | 28 from third_party import colorama |
| 26 from third_party.depot_tools import fix_encoding | 29 from third_party.depot_tools import fix_encoding |
| 27 from third_party.depot_tools import subcommand | 30 from third_party.depot_tools import subcommand |
| 28 | 31 |
| 32 from libs import arfile | |
| 29 from utils import file_path | 33 from utils import file_path |
| 30 from utils import fs | 34 from utils import fs |
| 31 from utils import logging_utils | 35 from utils import logging_utils |
| 32 from utils import lru | 36 from utils import lru |
| 33 from utils import net | 37 from utils import net |
| 34 from utils import on_error | 38 from utils import on_error |
| 35 from utils import subprocess42 | 39 from utils import subprocess42 |
| 36 from utils import threading_utils | 40 from utils import threading_utils |
| 37 from utils import tools | 41 from utils import tools |
| 38 | 42 |
| (...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 142 """ | 146 """ |
| 143 file_path.ensure_tree(os.path.dirname(path)) | 147 file_path.ensure_tree(os.path.dirname(path)) |
| 144 total = 0 | 148 total = 0 |
| 145 with fs.open(path, 'wb') as f: | 149 with fs.open(path, 'wb') as f: |
| 146 for d in content_generator: | 150 for d in content_generator: |
| 147 total += len(d) | 151 total += len(d) |
| 148 f.write(d) | 152 f.write(d) |
| 149 return total | 153 return total |
| 150 | 154 |
| 151 | 155 |
| 156 def fileobj_path(fileobj): | |
| 157 name = getattr(fileobj, "name", None) | |
|
nodir
2016/06/20 15:54:35
s/"/'/
mithro
2016/06/21 06:42:33
Done.
| |
| 158 if name is None: | |
| 159 return | |
| 160 | |
| 161 if not isinstance(name, unicode): | |
| 162 name = name.decode(sys.getfilesystemencoding()) | |
| 163 | |
| 164 if fs.exists(name): | |
| 165 return name | |
| 166 | |
| 167 | |
| 168 def fileobj_copy(dstfileobj, srcfileobj, amount=-1): | |
| 169 written = 0 | |
| 170 while written != amount: | |
| 171 readsize = NET_IO_FILE_CHUNK | |
| 172 if amount > 0: | |
| 173 readsize = min(readsize, amount-written) | |
| 174 data = srcfileobj.read(readsize) | |
| 175 if not data: | |
| 176 if amount == -1: | |
| 177 break | |
| 178 else: | |
|
nodir
2016/06/20 15:54:35
unnecessary else
mithro
2016/06/21 06:42:33
Done.
| |
| 179 raise IOError('partial file, got %s, wanted %s' % (written, amount)) | |
| 180 dstfileobj.write(data) | |
| 181 written += readsize | |
|
nodir
2016/06/20 15:54:35
you might have to `written = destfileobj.tell()` b
mithro
2016/06/21 06:42:33
len(data) should be enough.
If write doesn't writ
nodir
2016/06/21 22:20:05
Acknowledged.
| |
| 182 | |
| 183 | |
| 184 def putfile(srcfileobj, dstpath, file_mode=None, limit=-1): | |
|
nodir
2016/06/20 15:54:35
should self._used be updated in this method?
mithro
2016/06/21 06:42:33
This function doesn't have access to self._used an
nodir
2016/06/21 22:20:04
for some reason I thought it is a method, not a gl
| |
| 185 srcpath = fileobj_path(srcfileobj) | |
| 186 if srcpath: | |
| 187 readonly = file_mode is None or ( | |
| 188 file_mode & (stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)) | |
| 189 if readonly: | |
| 190 # We can link the file | |
| 191 file_path.link_file(dstpath, srcpath, file_path.HARDLINK_WITH_FALLBACK) | |
| 192 else: | |
| 193 # We must copy the file | |
| 194 file_path.link_file(dstpath, srcpath, file_path.COPY) | |
| 195 else: | |
| 196 # Need to write out the file | |
| 197 with fs.open(dstpath, "wb") as dstfileobj: | |
| 198 fileobj_copy(dstfileobj, srcfileobj, limit) | |
| 199 | |
| 200 # file_mode of 0 is actually valid, so need explicit check. | |
| 201 if file_mode is not None: | |
| 202 fs.chmod(dstpath, file_mode) | |
| 203 | |
| 204 | |
| 152 def zip_compress(content_generator, level=7): | 205 def zip_compress(content_generator, level=7): |
| 153 """Reads chunks from |content_generator| and yields zip compressed chunks.""" | 206 """Reads chunks from |content_generator| and yields zip compressed chunks.""" |
| 154 compressor = zlib.compressobj(level) | 207 compressor = zlib.compressobj(level) |
| 155 for chunk in content_generator: | 208 for chunk in content_generator: |
| 156 compressed = compressor.compress(chunk) | 209 compressed = compressor.compress(chunk) |
| 157 if compressed: | 210 if compressed: |
| 158 yield compressed | 211 yield compressed |
| 159 tail = compressor.flush(zlib.Z_FINISH) | 212 tail = compressor.flush(zlib.Z_FINISH) |
| 160 if tail: | 213 if tail: |
| 161 yield tail | 214 yield tail |
| (...skipping 1054 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1216 """ | 1269 """ |
| 1217 cache_dir = None | 1270 cache_dir = None |
| 1218 | 1271 |
| 1219 def __init__(self): | 1272 def __init__(self): |
| 1220 self._lock = threading_utils.LockWithAssert() | 1273 self._lock = threading_utils.LockWithAssert() |
| 1221 # Profiling values. | 1274 # Profiling values. |
| 1222 self._added = [] | 1275 self._added = [] |
| 1223 self._initial_number_items = 0 | 1276 self._initial_number_items = 0 |
| 1224 self._initial_size = 0 | 1277 self._initial_size = 0 |
| 1225 self._evicted = [] | 1278 self._evicted = [] |
| 1226 self._linked = [] | 1279 self._used = [] |
| 1227 | 1280 |
| 1228 def __contains__(self, digest): | 1281 def __contains__(self, digest): |
| 1229 raise NotImplementedError() | 1282 raise NotImplementedError() |
| 1230 | 1283 |
| 1231 def __enter__(self): | 1284 def __enter__(self): |
| 1232 """Context manager interface.""" | 1285 """Context manager interface.""" |
| 1233 return self | 1286 return self |
| 1234 | 1287 |
| 1235 def __exit__(self, _exc_type, _exec_value, _traceback): | 1288 def __exit__(self, _exc_type, _exec_value, _traceback): |
| 1236 """Context manager interface.""" | 1289 """Context manager interface.""" |
| 1237 return False | 1290 return False |
| 1238 | 1291 |
| 1239 @property | 1292 @property |
| 1240 def added(self): | 1293 def added(self): |
| 1241 return self._added[:] | 1294 return self._added[:] |
| 1242 | 1295 |
| 1243 @property | 1296 @property |
| 1244 def evicted(self): | 1297 def evicted(self): |
| 1245 return self._evicted[:] | 1298 return self._evicted[:] |
| 1246 | 1299 |
| 1247 @property | 1300 @property |
| 1301 def used(self): | |
| 1302 return self._used[:] | |
| 1303 | |
| 1304 @property | |
| 1248 def initial_number_items(self): | 1305 def initial_number_items(self): |
| 1249 return self._initial_number_items | 1306 return self._initial_number_items |
| 1250 | 1307 |
| 1251 @property | 1308 @property |
| 1252 def initial_size(self): | 1309 def initial_size(self): |
| 1253 return self._initial_size | 1310 return self._initial_size |
| 1254 | 1311 |
| 1255 @property | |
| 1256 def linked(self): | |
| 1257 return self._linked[:] | |
| 1258 | |
| 1259 def cached_set(self): | 1312 def cached_set(self): |
| 1260 """Returns a set of all cached digests (always a new object).""" | 1313 """Returns a set of all cached digests (always a new object).""" |
| 1261 raise NotImplementedError() | 1314 raise NotImplementedError() |
| 1262 | 1315 |
| 1263 def cleanup(self): | 1316 def cleanup(self): |
| 1264 """Deletes any corrupted item from the cache and trims it if necessary.""" | 1317 """Deletes any corrupted item from the cache and trims it if necessary.""" |
| 1265 raise NotImplementedError() | 1318 raise NotImplementedError() |
| 1266 | 1319 |
| 1267 def touch(self, digest, size): | 1320 def touch(self, digest, size): |
| 1268 """Ensures item is not corrupted and updates its LRU position. | 1321 """Ensures item is not corrupted and updates its LRU position. |
| 1269 | 1322 |
| 1270 Arguments: | 1323 Arguments: |
| 1271 digest: hash digest of item to check. | 1324 digest: hash digest of item to check. |
| 1272 size: expected size of this item. | 1325 size: expected size of this item. |
| 1273 | 1326 |
| 1274 Returns: | 1327 Returns: |
| 1275 True if item is in cache and not corrupted. | 1328 True if item is in cache and not corrupted. |
| 1276 """ | 1329 """ |
| 1277 raise NotImplementedError() | 1330 raise NotImplementedError() |
| 1278 | 1331 |
| 1279 def evict(self, digest): | 1332 def evict(self, digest): |
| 1280 """Removes item from cache if it's there.""" | 1333 """Removes item from cache if it's there.""" |
| 1281 raise NotImplementedError() | 1334 raise NotImplementedError() |
| 1282 | 1335 |
| 1283 def read(self, digest): | 1336 def getfileobj(self, digest): |
| 1284 """Returns contents of the cached item as a single str.""" | 1337 """Returns a file like object. |
| 1338 | |
| 1339 If file exists on the file system it will have a .name attribute with an | |
| 1340 absolute path to the file. | |
| 1341 """ | |
|
nodir
2016/06/20 15:54:34
document that it is the responsibility of the call
mithro
2016/06/21 06:42:33
I updated the docs to include that it is a *readab
M-A Ruel
2016/06/21 13:31:08
We are potentially hitting this case.
nodir
2016/06/21 22:20:04
IIRC windows requires a file handle to be closed f
mithro
2016/06/22 12:03:10
I believe this is correct if the file is opened in
| |
| 1285 raise NotImplementedError() | 1342 raise NotImplementedError() |
| 1286 | 1343 |
| 1287 def write(self, digest, content): | 1344 def write(self, digest, content): |
| 1288 """Reads data from |content| generator and stores it in cache. | 1345 """Reads data from |content| generator and stores it in cache. |
| 1289 | 1346 |
| 1290 Returns digest to simplify chaining. | 1347 Returns digest to simplify chaining. |
| 1291 """ | 1348 """ |
| 1292 raise NotImplementedError() | 1349 raise NotImplementedError() |
| 1293 | 1350 |
| 1294 def hardlink(self, digest, dest, file_mode): | |
| 1295 """Ensures file at |dest| has same content as cached |digest|. | |
| 1296 | |
| 1297 If file_mode is provided, it is used to set the executable bit if | |
| 1298 applicable. | |
| 1299 """ | |
| 1300 raise NotImplementedError() | |
| 1301 | |
| 1302 | 1351 |
| 1303 class MemoryCache(LocalCache): | 1352 class MemoryCache(LocalCache): |
| 1304 """LocalCache implementation that stores everything in memory.""" | 1353 """LocalCache implementation that stores everything in memory.""" |
| 1305 | 1354 |
| 1306 def __init__(self, file_mode_mask=0500): | 1355 def __init__(self, file_mode_mask=0500): |
| 1307 """Args: | 1356 """Args: |
| 1308 file_mode_mask: bit mask to AND file mode with. Default value will make | 1357 file_mode_mask: bit mask to AND file mode with. Default value will make |
| 1309 all mapped files to be read only. | 1358 all mapped files to be read only. |
| 1310 """ | 1359 """ |
| 1311 super(MemoryCache, self).__init__() | 1360 super(MemoryCache, self).__init__() |
| (...skipping 14 matching lines...) Expand all Loading... | |
| 1326 def touch(self, digest, size): | 1375 def touch(self, digest, size): |
| 1327 with self._lock: | 1376 with self._lock: |
| 1328 return digest in self._contents | 1377 return digest in self._contents |
| 1329 | 1378 |
| 1330 def evict(self, digest): | 1379 def evict(self, digest): |
| 1331 with self._lock: | 1380 with self._lock: |
| 1332 v = self._contents.pop(digest, None) | 1381 v = self._contents.pop(digest, None) |
| 1333 if v is not None: | 1382 if v is not None: |
| 1334 self._evicted.add(v) | 1383 self._evicted.add(v) |
| 1335 | 1384 |
| 1336 def read(self, digest): | 1385 def getfileobj(self, digest): |
| 1337 with self._lock: | 1386 with self._lock: |
| 1338 try: | 1387 try: |
| 1339 return self._contents[digest] | 1388 d = self._contents[digest] |
| 1389 self._used.append(len(d)) | |
| 1390 return StringIO.StringIO(d) | |
| 1340 except KeyError: | 1391 except KeyError: |
| 1341 raise CacheMiss(digest) | 1392 raise CacheMiss(digest) |
| 1342 | 1393 |
| 1343 def write(self, digest, content): | 1394 def write(self, digest, content): |
| 1344 # Assemble whole stream before taking the lock. | 1395 # Assemble whole stream before taking the lock. |
| 1345 data = ''.join(content) | 1396 data = ''.join(content) |
| 1346 with self._lock: | 1397 with self._lock: |
| 1347 self._contents[digest] = data | 1398 self._contents[digest] = data |
| 1348 self._added.append(len(data)) | 1399 self._added.append(len(data)) |
| 1349 return digest | 1400 return digest |
| 1350 | 1401 |
| 1351 def hardlink(self, digest, dest, file_mode): | |
| 1352 """Since data is kept in memory, there is no filenode to hardlink.""" | |
| 1353 data = self.read(digest) | |
| 1354 file_write(dest, [data]) | |
| 1355 if file_mode is not None: | |
| 1356 fs.chmod(dest, file_mode & self._file_mode_mask) | |
| 1357 with self._lock: | |
| 1358 self._linked.append(len(data)) | |
| 1359 | |
| 1360 | 1402 |
| 1361 class CachePolicies(object): | 1403 class CachePolicies(object): |
| 1362 def __init__(self, max_cache_size, min_free_space, max_items): | 1404 def __init__(self, max_cache_size, min_free_space, max_items): |
| 1363 """ | 1405 """ |
| 1364 Arguments: | 1406 Arguments: |
| 1365 - max_cache_size: Trim if the cache gets larger than this value. If 0, the | 1407 - max_cache_size: Trim if the cache gets larger than this value. If 0, the |
| 1366 cache is effectively a leak. | 1408 cache is effectively a leak. |
| 1367 - min_free_space: Trim if disk free space becomes lower than this value. If | 1409 - min_free_space: Trim if disk free space becomes lower than this value. If |
| 1368 0, it unconditionally fill the disk. | 1410 0, it unconditionally fill the disk. |
| 1369 - max_items: Maximum number of items to keep in the cache. If 0, do not | 1411 - max_items: Maximum number of items to keep in the cache. If 0, do not |
| (...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1481 self._protected.add(digest) | 1523 self._protected.add(digest) |
| 1482 return True | 1524 return True |
| 1483 | 1525 |
| 1484 def evict(self, digest): | 1526 def evict(self, digest): |
| 1485 with self._lock: | 1527 with self._lock: |
| 1486 # Do not check for 'digest in self._protected' since it could be because | 1528 # Do not check for 'digest in self._protected' since it could be because |
| 1487 # the object is corrupted. | 1529 # the object is corrupted. |
| 1488 self._lru.pop(digest) | 1530 self._lru.pop(digest) |
| 1489 self._delete_file(digest, UNKNOWN_FILE_SIZE) | 1531 self._delete_file(digest, UNKNOWN_FILE_SIZE) |
| 1490 | 1532 |
| 1491 def read(self, digest): | 1533 def getfileobj(self, digest): |
| 1492 try: | 1534 try: |
| 1493 with fs.open(self._path(digest), 'rb') as f: | 1535 f = fs.open(self._path(digest), 'rb') |
| 1494 return f.read() | 1536 with self._lock: |
| 1537 self._used.append(self._lru[digest]) | |
| 1538 return f | |
| 1495 except IOError: | 1539 except IOError: |
| 1496 raise CacheMiss(digest) | 1540 raise CacheMiss(digest) |
| 1497 | 1541 |
| 1498 def write(self, digest, content): | 1542 def write(self, digest, content): |
| 1499 assert content is not None | 1543 assert content is not None |
| 1500 with self._lock: | 1544 with self._lock: |
| 1501 self._protected.add(digest) | 1545 self._protected.add(digest) |
| 1502 path = self._path(digest) | 1546 path = self._path(digest) |
| 1503 # A stale broken file may remain. It is possible for the file to have write | 1547 # A stale broken file may remain. It is possible for the file to have write |
| 1504 # access bit removed which would cause the file_write() call to fail to open | 1548 # access bit removed which would cause the file_write() call to fail to open |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 1515 file_path.try_remove(path) | 1559 file_path.try_remove(path) |
| 1516 raise | 1560 raise |
| 1517 # Make the file read-only in the cache. This has a few side-effects since | 1561 # Make the file read-only in the cache. This has a few side-effects since |
| 1518 # the file node is modified, so every directory entries to this file becomes | 1562 # the file node is modified, so every directory entries to this file becomes |
| 1519 # read-only. It's fine here because it is a new file. | 1563 # read-only. It's fine here because it is a new file. |
| 1520 file_path.set_read_only(path, True) | 1564 file_path.set_read_only(path, True) |
| 1521 with self._lock: | 1565 with self._lock: |
| 1522 self._add(digest, size) | 1566 self._add(digest, size) |
| 1523 return digest | 1567 return digest |
| 1524 | 1568 |
| 1525 def hardlink(self, digest, dest, file_mode): | |
| 1526 """Hardlinks the file to |dest|. | |
| 1527 | |
| 1528 Note that the file permission bits are on the file node, not the directory | |
| 1529 entry, so changing the access bit on any of the directory entries for the | |
| 1530 file node will affect them all. | |
| 1531 """ | |
| 1532 path = self._path(digest) | |
| 1533 if not file_path.link_file(dest, path, file_path.HARDLINK_WITH_FALLBACK): | |
| 1534 # Report to the server that it failed with more details. We'll want to | |
| 1535 # squash them all. | |
| 1536 on_error.report('Failed to hardlink\n%s -> %s' % (path, dest)) | |
| 1537 | |
| 1538 if file_mode is not None: | |
| 1539 # Ignores all other bits. | |
| 1540 fs.chmod(dest, file_mode & 0500) | |
| 1541 with self._lock: | |
| 1542 self._linked.append(self._lru[digest]) | |
| 1543 | |
| 1544 def _load(self): | 1569 def _load(self): |
| 1545 """Loads state of the cache from json file.""" | 1570 """Loads state of the cache from json file.""" |
| 1546 self._lock.assert_locked() | 1571 self._lock.assert_locked() |
| 1547 | 1572 |
| 1548 if not os.path.isdir(self.cache_dir): | 1573 if not os.path.isdir(self.cache_dir): |
| 1549 fs.makedirs(self.cache_dir) | 1574 fs.makedirs(self.cache_dir) |
| 1550 else: | 1575 else: |
| 1551 # Make sure the cache is read-only. | 1576 # Make sure the cache is read-only. |
| 1552 # TODO(maruel): Calculate the cost and optimize the performance | 1577 # TODO(maruel): Calculate the cost and optimize the performance |
| 1553 # accordingly. | 1578 # accordingly. |
| (...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1764 pending[h] = isolated_file | 1789 pending[h] = isolated_file |
| 1765 fetch_queue.add(h, priority=threading_utils.PRIORITY_HIGH) | 1790 fetch_queue.add(h, priority=threading_utils.PRIORITY_HIGH) |
| 1766 | 1791 |
| 1767 # Start fetching root *.isolated file (single file, not the whole bundle). | 1792 # Start fetching root *.isolated file (single file, not the whole bundle). |
| 1768 retrieve_async(self.root) | 1793 retrieve_async(self.root) |
| 1769 | 1794 |
| 1770 while pending: | 1795 while pending: |
| 1771 # Wait until some *.isolated file is fetched, parse it. | 1796 # Wait until some *.isolated file is fetched, parse it. |
| 1772 item_hash = fetch_queue.wait(pending) | 1797 item_hash = fetch_queue.wait(pending) |
| 1773 item = pending.pop(item_hash) | 1798 item = pending.pop(item_hash) |
| 1774 item.load(fetch_queue.cache.read(item_hash)) | 1799 item.load(fetch_queue.cache.getfileobj(item_hash).read()) |
| 1775 | 1800 |
| 1776 # Start fetching included *.isolated files. | 1801 # Start fetching included *.isolated files. |
| 1777 for new_child in item.children: | 1802 for new_child in item.children: |
| 1778 retrieve_async(new_child) | 1803 retrieve_async(new_child) |
| 1779 | 1804 |
| 1780 # Always fetch *.isolated files in traversal order, waiting if necessary | 1805 # Always fetch *.isolated files in traversal order, waiting if necessary |
| 1781 # until next to-be-processed node loads. "Waiting" is done by yielding | 1806 # until next to-be-processed node loads. "Waiting" is done by yielding |
| 1782 # back to the outer loop, that waits until some *.isolated is loaded. | 1807 # back to the outer loop, that waits until some *.isolated is loaded. |
| 1783 for node in isolated_format.walk_includes(self.root): | 1808 for node in isolated_format.walk_includes(self.root): |
| 1784 if node not in processed: | 1809 if node not in processed: |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 1802 """Starts fetching files from |isolated| that are not yet being fetched. | 1827 """Starts fetching files from |isolated| that are not yet being fetched. |
| 1803 | 1828 |
| 1804 Modifies self.files. | 1829 Modifies self.files. |
| 1805 """ | 1830 """ |
| 1806 logging.debug('fetch_files(%s)', isolated.obj_hash) | 1831 logging.debug('fetch_files(%s)', isolated.obj_hash) |
| 1807 for filepath, properties in isolated.data.get('files', {}).iteritems(): | 1832 for filepath, properties in isolated.data.get('files', {}).iteritems(): |
| 1808 # Root isolated has priority on the files being mapped. In particular, | 1833 # Root isolated has priority on the files being mapped. In particular, |
| 1809 # overridden files must not be fetched. | 1834 # overridden files must not be fetched. |
| 1810 if filepath not in self.files: | 1835 if filepath not in self.files: |
| 1811 self.files[filepath] = properties | 1836 self.files[filepath] = properties |
| 1837 | |
| 1838 # Make sure if the isolated is read only, the mode doesn't have write | |
| 1839 # bits. | |
| 1840 if 'm' in properties and self.read_only: | |
| 1841 properties['m'] &= ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH) | |
| 1842 | |
| 1843 # Preemptively request hashed files. | |
| 1812 if 'h' in properties: | 1844 if 'h' in properties: |
| 1813 # Preemptively request files. | |
| 1814 logging.debug('fetching %s', filepath) | 1845 logging.debug('fetching %s', filepath) |
| 1815 fetch_queue.add( | 1846 fetch_queue.add( |
| 1816 properties['h'], properties['s'], threading_utils.PRIORITY_MED) | 1847 properties['h'], properties['s'], threading_utils.PRIORITY_MED) |
| 1817 | 1848 |
| 1818 def _update_self(self, node): | 1849 def _update_self(self, node): |
| 1819 """Extracts bundle global parameters from loaded *.isolated file. | 1850 """Extracts bundle global parameters from loaded *.isolated file. |
| 1820 | 1851 |
| 1821 Will be called with each loaded *.isolated file in order of traversal of | 1852 Will be called with each loaded *.isolated file in order of traversal of |
| 1822 isolated include graph (see isolated_format.walk_includes). | 1853 isolated include graph (see isolated_format.walk_includes). |
| 1823 """ | 1854 """ |
| (...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1965 logging.info('Retrieving remaining files (%d of them)...', | 1996 logging.info('Retrieving remaining files (%d of them)...', |
| 1966 fetch_queue.pending_count) | 1997 fetch_queue.pending_count) |
| 1967 last_update = time.time() | 1998 last_update = time.time() |
| 1968 with threading_utils.DeadlockDetector(DEADLOCK_TIMEOUT) as detector: | 1999 with threading_utils.DeadlockDetector(DEADLOCK_TIMEOUT) as detector: |
| 1969 while remaining: | 2000 while remaining: |
| 1970 detector.ping() | 2001 detector.ping() |
| 1971 | 2002 |
| 1972 # Wait for any item to finish fetching to cache. | 2003 # Wait for any item to finish fetching to cache. |
| 1973 digest = fetch_queue.wait(remaining) | 2004 digest = fetch_queue.wait(remaining) |
| 1974 | 2005 |
| 1975 # Link corresponding files to a fetched item in cache. | 2006 # Create the files in the destination using item in cache as the |
| 2007 # source. | |
| 1976 for filepath, props in remaining.pop(digest): | 2008 for filepath, props in remaining.pop(digest): |
| 1977 dest = os.path.join(outdir, filepath) | 2009 fullpath = os.path.join(outdir, filepath) |
| 1978 if os.path.exists(dest): | 2010 srcfileobj = cache.getfileobj(digest) |
| 1979 raise AlreadyExists('File %s already exists' % dest) | 2011 |
| 1980 cache.hardlink(digest, dest, props.get('m')) | 2012 isarchive = props.get('a') |
| 2013 if isarchive: | |
| 2014 basedir = os.path.dirname(fullpath) | |
| 2015 | |
| 2016 extractor = None | |
| 2017 if isarchive == 'ar': | |
|
nodir
2016/06/20 15:54:35
perhaps inverse the condition and remove `extracto
mithro
2016/06/21 06:42:33
extractor = None is actually not needed. Removed.
nodir
2016/06/21 22:20:04
the goal was to reduce nesting. With condition inv
mithro
2016/06/22 12:03:10
I refactored this code a little.
| |
| 2018 extractor = arfile.ArFileReader(srcfileobj, fullparse=False) | |
| 2019 else: | |
| 2020 raise isolated_format.IsolatedError( | |
| 2021 'Unknown archive format %r', isarchive) | |
| 2022 | |
| 2023 for ai, ifd in extractor: | |
| 2024 fp = os.path.normpath(os.path.join(basedir, ai.name)) | |
| 2025 file_path.ensure_tree(os.path.dirname(fp)) | |
| 2026 putfile(ifd, fp, ai.mode, ai.size) | |
| 2027 | |
| 2028 else: | |
| 2029 file_mode = props.get('m') | |
| 2030 if file_mode: | |
| 2031 # Ignore all bits apart from the user | |
| 2032 file_mode &= 0700 | |
| 2033 putfile(srcfileobj, fullpath, file_mode) | |
| 1981 | 2034 |
| 1982 # Report progress. | 2035 # Report progress. |
| 1983 duration = time.time() - last_update | 2036 duration = time.time() - last_update |
| 1984 if duration > DELAY_BETWEEN_UPDATES_IN_SECS: | 2037 if duration > DELAY_BETWEEN_UPDATES_IN_SECS: |
| 1985 msg = '%d files remaining...' % len(remaining) | 2038 msg = '%d files remaining...' % len(remaining) |
| 1986 print msg | 2039 print msg |
| 1987 logging.info(msg) | 2040 logging.info(msg) |
| 1988 last_update = time.time() | 2041 last_update = time.time() |
| 1989 | 2042 |
| 1990 # Cache could evict some items we just tried to fetch, it's a fatal error. | 2043 # Cache could evict some items we just tried to fetch, it's a fatal error. |
| (...skipping 316 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2307 dispatcher = subcommand.CommandDispatcher(__name__) | 2360 dispatcher = subcommand.CommandDispatcher(__name__) |
| 2308 return dispatcher.execute(OptionParserIsolateServer(), args) | 2361 return dispatcher.execute(OptionParserIsolateServer(), args) |
| 2309 | 2362 |
| 2310 | 2363 |
| 2311 if __name__ == '__main__': | 2364 if __name__ == '__main__': |
| 2312 subprocess42.inhibit_os_error_reporting() | 2365 subprocess42.inhibit_os_error_reporting() |
| 2313 fix_encoding.fix_encoding() | 2366 fix_encoding.fix_encoding() |
| 2314 tools.disable_buffering() | 2367 tools.disable_buffering() |
| 2315 colorama.init() | 2368 colorama.init() |
| 2316 sys.exit(main(sys.argv[1:])) | 2369 sys.exit(main(sys.argv[1:])) |
| OLD | NEW |