Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(551)

Side by Side Diff: client/isolateserver.py

Issue 2060983006: luci-py/isolateserver.py: Add archive support when downloading. (Closed) Base URL: https://github.com/luci/luci-py.git@master
Patch Set: Rebase Created 4 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright 2013 The LUCI Authors. All rights reserved. 2 # Copyright 2013 The LUCI Authors. All rights reserved.
3 # Use of this source code is governed under the Apache License, Version 2.0 3 # Use of this source code is governed under the Apache License, Version 2.0
4 # that can be found in the LICENSE file. 4 # that can be found in the LICENSE file.
5 5
6 """Archives a set of files or directories to an Isolate Server.""" 6 """Archives a set of files or directories to an Isolate Server."""
7 7
8 __version__ = '0.4.9' 8 __version__ = '0.4.9'
9 9
10 import base64 10 import base64
11 import errno
11 import functools 12 import functools
12 import errno 13 import io
13 import logging 14 import logging
14 import optparse 15 import optparse
15 import os 16 import os
16 import re 17 import re
17 import signal 18 import signal
19 import stat
18 import sys 20 import sys
19 import tempfile 21 import tempfile
20 import threading 22 import threading
21 import time 23 import time
22 import types 24 import types
23 import zlib 25 import zlib
24 26
25 from third_party import colorama 27 from third_party import colorama
26 from third_party.depot_tools import fix_encoding 28 from third_party.depot_tools import fix_encoding
27 from third_party.depot_tools import subcommand 29 from third_party.depot_tools import subcommand
28 30
31 from libs import arfile
29 from utils import file_path 32 from utils import file_path
30 from utils import fs 33 from utils import fs
31 from utils import logging_utils 34 from utils import logging_utils
32 from utils import lru 35 from utils import lru
33 from utils import net 36 from utils import net
34 from utils import on_error 37 from utils import on_error
35 from utils import subprocess42 38 from utils import subprocess42
36 from utils import threading_utils 39 from utils import threading_utils
37 from utils import tools 40 from utils import tools
38 41
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after
142 """ 145 """
143 file_path.ensure_tree(os.path.dirname(path)) 146 file_path.ensure_tree(os.path.dirname(path))
144 total = 0 147 total = 0
145 with fs.open(path, 'wb') as f: 148 with fs.open(path, 'wb') as f:
146 for d in content_generator: 149 for d in content_generator:
147 total += len(d) 150 total += len(d)
148 f.write(d) 151 f.write(d)
149 return total 152 return total
150 153
151 154
155 def fileobj_path(fileobj):
156 """Return file system path for file like object.
nodir 2016/06/22 16:34:06 or None
mithro 2016/06/23 07:17:21 Done.
157
158 The returned path is guaranteed to exist and can be passed to file system
159 operations like copy.
160 """
161 name = getattr(fileobj, 'name', None)
162 if name is None:
163 return
164
165 # If the file like object was created using something like open("test.txt")
166 # name will end up being a str (such as a function outside our control, like
167 # the standard library). We want all our paths to be unicode objects, so we
168 # decode it.
169 if not isinstance(name, unicode):
170 name = name.decode(sys.getfilesystemencoding())
171
172 if fs.exists(name):
173 return name
174
175
176 # FIXME: Replace fileobj_copy with shutil.copyfileobj once proper file wrappers
M-A Ruel 2016/06/22 17:17:49 TODO(tansell)
mithro 2016/06/23 07:17:22 Done.
177 # have been created.
178 def fileobj_copy(dstfileobj, srcfileobj, size=-1):
179 """Copy data from srcfileobj to dstfileobj.
180
181 Providing size means exactly that amount of data will be copied (if there
182 isn't enough data, an IOError exception is thrown). Otherwise all data until
183 the EOF marker will be copied.
184 """
185
186 written = 0
187 while written != size:
188 readsize = NET_IO_FILE_CHUNK
189 if size > 0:
190 readsize = min(readsize, size-written)
191 data = srcfileobj.read(readsize)
192 if not data:
193 if size == -1:
194 break
195 raise IOError('partial file, got %s, wanted %s' % (written, size))
196 dstfileobj.write(data)
197 written += len(data)
198
199
200 def putfile(srcfileobj, dstpath, file_mode=None, size=-1):
201 """Put srcfileobj at the given dstpath with given mode.
202
203 The function aims to do this as efficiently as possible while still allowing
204 any possible file like object be given.
205 """
206 srcpath = fileobj_path(srcfileobj)
207 if srcpath and size == -1:
208 readonly = file_mode is None or (
209 file_mode & (stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH))
210 if readonly:
211 # We can link the file
212 file_path.link_file(dstpath, srcpath, file_path.HARDLINK_WITH_FALLBACK)
213 else:
214 # We must copy the file
215 file_path.link_file(dstpath, srcpath, file_path.COPY)
216 else:
217 # Need to write out the file
218 with fs.open(dstpath, 'wb') as dstfileobj:
219 fileobj_copy(dstfileobj, srcfileobj, size)
220
221 # file_mode of 0 is actually valid, so need explicit check.
222 if file_mode is not None:
223 fs.chmod(dstpath, file_mode)
224
225
152 def zip_compress(content_generator, level=7): 226 def zip_compress(content_generator, level=7):
153 """Reads chunks from |content_generator| and yields zip compressed chunks.""" 227 """Reads chunks from |content_generator| and yields zip compressed chunks."""
154 compressor = zlib.compressobj(level) 228 compressor = zlib.compressobj(level)
155 for chunk in content_generator: 229 for chunk in content_generator:
156 compressed = compressor.compress(chunk) 230 compressed = compressor.compress(chunk)
157 if compressed: 231 if compressed:
158 yield compressed 232 yield compressed
159 tail = compressor.flush(zlib.Z_FINISH) 233 tail = compressor.flush(zlib.Z_FINISH)
160 if tail: 234 if tail:
161 yield tail 235 yield tail
(...skipping 1054 matching lines...) Expand 10 before | Expand all | Expand 10 after
1216 """ 1290 """
1217 cache_dir = None 1291 cache_dir = None
1218 1292
1219 def __init__(self): 1293 def __init__(self):
1220 self._lock = threading_utils.LockWithAssert() 1294 self._lock = threading_utils.LockWithAssert()
1221 # Profiling values. 1295 # Profiling values.
1222 self._added = [] 1296 self._added = []
1223 self._initial_number_items = 0 1297 self._initial_number_items = 0
1224 self._initial_size = 0 1298 self._initial_size = 0
1225 self._evicted = [] 1299 self._evicted = []
1226 self._linked = [] 1300 self._used = []
1227 1301
1228 def __contains__(self, digest): 1302 def __contains__(self, digest):
1229 raise NotImplementedError() 1303 raise NotImplementedError()
1230 1304
1231 def __enter__(self): 1305 def __enter__(self):
1232 """Context manager interface.""" 1306 """Context manager interface."""
1233 return self 1307 return self
1234 1308
1235 def __exit__(self, _exc_type, _exec_value, _traceback): 1309 def __exit__(self, _exc_type, _exec_value, _traceback):
1236 """Context manager interface.""" 1310 """Context manager interface."""
1237 return False 1311 return False
1238 1312
1239 @property 1313 @property
1240 def added(self): 1314 def added(self):
1241 return self._added[:] 1315 return self._added[:]
1242 1316
1243 @property 1317 @property
1244 def evicted(self): 1318 def evicted(self):
1245 return self._evicted[:] 1319 return self._evicted[:]
1246 1320
1247 @property 1321 @property
1322 def used(self):
1323 return self._used[:]
1324
1325 @property
1248 def initial_number_items(self): 1326 def initial_number_items(self):
1249 return self._initial_number_items 1327 return self._initial_number_items
1250 1328
1251 @property 1329 @property
1252 def initial_size(self): 1330 def initial_size(self):
1253 return self._initial_size 1331 return self._initial_size
1254 1332
1255 @property
1256 def linked(self):
1257 return self._linked[:]
1258
1259 def cached_set(self): 1333 def cached_set(self):
1260 """Returns a set of all cached digests (always a new object).""" 1334 """Returns a set of all cached digests (always a new object)."""
1261 raise NotImplementedError() 1335 raise NotImplementedError()
1262 1336
1263 def cleanup(self): 1337 def cleanup(self):
1264 """Deletes any corrupted item from the cache and trims it if necessary.""" 1338 """Deletes any corrupted item from the cache and trims it if necessary."""
1265 raise NotImplementedError() 1339 raise NotImplementedError()
1266 1340
1267 def touch(self, digest, size): 1341 def touch(self, digest, size):
1268 """Ensures item is not corrupted and updates its LRU position. 1342 """Ensures item is not corrupted and updates its LRU position.
1269 1343
1270 Arguments: 1344 Arguments:
1271 digest: hash digest of item to check. 1345 digest: hash digest of item to check.
1272 size: expected size of this item. 1346 size: expected size of this item.
1273 1347
1274 Returns: 1348 Returns:
1275 True if item is in cache and not corrupted. 1349 True if item is in cache and not corrupted.
1276 """ 1350 """
1277 raise NotImplementedError() 1351 raise NotImplementedError()
1278 1352
1279 def evict(self, digest): 1353 def evict(self, digest):
1280 """Removes item from cache if it's there.""" 1354 """Removes item from cache if it's there."""
1281 raise NotImplementedError() 1355 raise NotImplementedError()
1282 1356
1283 def read(self, digest): 1357 def getfileobj(self, digest):
1284 """Returns contents of the cached item as a single str.""" 1358 """Returns a readable file like object.
1359
1360 If file exists on the file system it will have a .name attribute with an
1361 absolute path to the file.
1362 """
1285 raise NotImplementedError() 1363 raise NotImplementedError()
1286 1364
1287 def write(self, digest, content): 1365 def write(self, digest, content):
1288 """Reads data from |content| generator and stores it in cache. 1366 """Reads data from |content| generator and stores it in cache.
1289 1367
1290 Returns digest to simplify chaining. 1368 Returns digest to simplify chaining.
1291 """ 1369 """
1292 raise NotImplementedError() 1370 raise NotImplementedError()
1293 1371
1294 def hardlink(self, digest, dest, file_mode):
1295 """Ensures file at |dest| has same content as cached |digest|.
1296
1297 If file_mode is provided, it is used to set the executable bit if
1298 applicable.
1299 """
1300 raise NotImplementedError()
1301
1302 1372
1303 class MemoryCache(LocalCache): 1373 class MemoryCache(LocalCache):
1304 """LocalCache implementation that stores everything in memory.""" 1374 """LocalCache implementation that stores everything in memory."""
1305 1375
1306 def __init__(self, file_mode_mask=0500): 1376 def __init__(self, file_mode_mask=0500):
1307 """Args: 1377 """Args:
1308 file_mode_mask: bit mask to AND file mode with. Default value will make 1378 file_mode_mask: bit mask to AND file mode with. Default value will make
1309 all mapped files to be read only. 1379 all mapped files to be read only.
1310 """ 1380 """
1311 super(MemoryCache, self).__init__() 1381 super(MemoryCache, self).__init__()
(...skipping 14 matching lines...) Expand all
1326 def touch(self, digest, size): 1396 def touch(self, digest, size):
1327 with self._lock: 1397 with self._lock:
1328 return digest in self._contents 1398 return digest in self._contents
1329 1399
1330 def evict(self, digest): 1400 def evict(self, digest):
1331 with self._lock: 1401 with self._lock:
1332 v = self._contents.pop(digest, None) 1402 v = self._contents.pop(digest, None)
1333 if v is not None: 1403 if v is not None:
1334 self._evicted.add(v) 1404 self._evicted.add(v)
1335 1405
1336 def read(self, digest): 1406 def getfileobj(self, digest):
1337 with self._lock: 1407 with self._lock:
1338 try: 1408 try:
1339 return self._contents[digest] 1409 d = self._contents[digest]
1340 except KeyError: 1410 except KeyError:
1341 raise CacheMiss(digest) 1411 raise CacheMiss(digest)
1412 self._used.append(len(d))
1413 return io.BytesIO(d)
1342 1414
1343 def write(self, digest, content): 1415 def write(self, digest, content):
1344 # Assemble whole stream before taking the lock. 1416 # Assemble whole stream before taking the lock.
1345 data = ''.join(content) 1417 data = ''.join(content)
1346 with self._lock: 1418 with self._lock:
1347 self._contents[digest] = data 1419 self._contents[digest] = data
1348 self._added.append(len(data)) 1420 self._added.append(len(data))
1349 return digest 1421 return digest
1350 1422
1351 def hardlink(self, digest, dest, file_mode):
1352 """Since data is kept in memory, there is no filenode to hardlink."""
1353 data = self.read(digest)
1354 file_write(dest, [data])
1355 if file_mode is not None:
1356 fs.chmod(dest, file_mode & self._file_mode_mask)
1357 with self._lock:
1358 self._linked.append(len(data))
1359
1360 1423
1361 class CachePolicies(object): 1424 class CachePolicies(object):
1362 def __init__(self, max_cache_size, min_free_space, max_items): 1425 def __init__(self, max_cache_size, min_free_space, max_items):
1363 """ 1426 """
1364 Arguments: 1427 Arguments:
1365 - max_cache_size: Trim if the cache gets larger than this value. If 0, the 1428 - max_cache_size: Trim if the cache gets larger than this value. If 0, the
1366 cache is effectively a leak. 1429 cache is effectively a leak.
1367 - min_free_space: Trim if disk free space becomes lower than this value. If 1430 - min_free_space: Trim if disk free space becomes lower than this value. If
1368 0, it unconditionally fill the disk. 1431 0, it unconditionally fill the disk.
1369 - max_items: Maximum number of items to keep in the cache. If 0, do not 1432 - max_items: Maximum number of items to keep in the cache. If 0, do not
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
1481 self._protected.add(digest) 1544 self._protected.add(digest)
1482 return True 1545 return True
1483 1546
1484 def evict(self, digest): 1547 def evict(self, digest):
1485 with self._lock: 1548 with self._lock:
1486 # Do not check for 'digest in self._protected' since it could be because 1549 # Do not check for 'digest in self._protected' since it could be because
1487 # the object is corrupted. 1550 # the object is corrupted.
1488 self._lru.pop(digest) 1551 self._lru.pop(digest)
1489 self._delete_file(digest, UNKNOWN_FILE_SIZE) 1552 self._delete_file(digest, UNKNOWN_FILE_SIZE)
1490 1553
1491 def read(self, digest): 1554 def getfileobj(self, digest):
1492 try: 1555 try:
1493 with fs.open(self._path(digest), 'rb') as f: 1556 f = fs.open(self._path(digest), 'rb')
1494 return f.read() 1557 with self._lock:
1558 self._used.append(self._lru[digest])
1559 return f
1495 except IOError: 1560 except IOError:
1496 raise CacheMiss(digest) 1561 raise CacheMiss(digest)
1497 1562
1498 def write(self, digest, content): 1563 def write(self, digest, content):
1499 assert content is not None 1564 assert content is not None
1500 with self._lock: 1565 with self._lock:
1501 self._protected.add(digest) 1566 self._protected.add(digest)
1502 path = self._path(digest) 1567 path = self._path(digest)
1503 # A stale broken file may remain. It is possible for the file to have write 1568 # A stale broken file may remain. It is possible for the file to have write
1504 # access bit removed which would cause the file_write() call to fail to open 1569 # access bit removed which would cause the file_write() call to fail to open
(...skipping 10 matching lines...) Expand all
1515 file_path.try_remove(path) 1580 file_path.try_remove(path)
1516 raise 1581 raise
1517 # Make the file read-only in the cache. This has a few side-effects since 1582 # Make the file read-only in the cache. This has a few side-effects since
1518 # the file node is modified, so every directory entries to this file becomes 1583 # the file node is modified, so every directory entries to this file becomes
1519 # read-only. It's fine here because it is a new file. 1584 # read-only. It's fine here because it is a new file.
1520 file_path.set_read_only(path, True) 1585 file_path.set_read_only(path, True)
1521 with self._lock: 1586 with self._lock:
1522 self._add(digest, size) 1587 self._add(digest, size)
1523 return digest 1588 return digest
1524 1589
1525 def hardlink(self, digest, dest, file_mode):
1526 """Hardlinks the file to |dest|.
1527
1528 Note that the file permission bits are on the file node, not the directory
1529 entry, so changing the access bit on any of the directory entries for the
1530 file node will affect them all.
1531 """
1532 path = self._path(digest)
1533 if not file_path.link_file(dest, path, file_path.HARDLINK_WITH_FALLBACK):
1534 # Report to the server that it failed with more details. We'll want to
1535 # squash them all.
1536 on_error.report('Failed to hardlink\n%s -> %s' % (path, dest))
1537
1538 if file_mode is not None:
1539 # Ignores all other bits.
1540 fs.chmod(dest, file_mode & 0500)
1541 with self._lock:
1542 self._linked.append(self._lru[digest])
1543
1544 def _load(self): 1590 def _load(self):
1545 """Loads state of the cache from json file.""" 1591 """Loads state of the cache from json file."""
1546 self._lock.assert_locked() 1592 self._lock.assert_locked()
1547 1593
1548 if not os.path.isdir(self.cache_dir): 1594 if not os.path.isdir(self.cache_dir):
1549 fs.makedirs(self.cache_dir) 1595 fs.makedirs(self.cache_dir)
1550 else: 1596 else:
1551 # Make sure the cache is read-only. 1597 # Make sure the cache is read-only.
1552 # TODO(maruel): Calculate the cost and optimize the performance 1598 # TODO(maruel): Calculate the cost and optimize the performance
1553 # accordingly. 1599 # accordingly.
(...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after
1764 pending[h] = isolated_file 1810 pending[h] = isolated_file
1765 fetch_queue.add(h, priority=threading_utils.PRIORITY_HIGH) 1811 fetch_queue.add(h, priority=threading_utils.PRIORITY_HIGH)
1766 1812
1767 # Start fetching root *.isolated file (single file, not the whole bundle). 1813 # Start fetching root *.isolated file (single file, not the whole bundle).
1768 retrieve_async(self.root) 1814 retrieve_async(self.root)
1769 1815
1770 while pending: 1816 while pending:
1771 # Wait until some *.isolated file is fetched, parse it. 1817 # Wait until some *.isolated file is fetched, parse it.
1772 item_hash = fetch_queue.wait(pending) 1818 item_hash = fetch_queue.wait(pending)
1773 item = pending.pop(item_hash) 1819 item = pending.pop(item_hash)
1774 item.load(fetch_queue.cache.read(item_hash)) 1820 with fetch_queue.cache.getfileobj(item_hash) as f:
1821 item.load(f.read())
1775 1822
1776 # Start fetching included *.isolated files. 1823 # Start fetching included *.isolated files.
1777 for new_child in item.children: 1824 for new_child in item.children:
1778 retrieve_async(new_child) 1825 retrieve_async(new_child)
1779 1826
1780 # Always fetch *.isolated files in traversal order, waiting if necessary 1827 # Always fetch *.isolated files in traversal order, waiting if necessary
1781 # until next to-be-processed node loads. "Waiting" is done by yielding 1828 # until next to-be-processed node loads. "Waiting" is done by yielding
1782 # back to the outer loop, that waits until some *.isolated is loaded. 1829 # back to the outer loop, that waits until some *.isolated is loaded.
1783 for node in isolated_format.walk_includes(self.root): 1830 for node in isolated_format.walk_includes(self.root):
1784 if node not in processed: 1831 if node not in processed:
(...skipping 17 matching lines...) Expand all
1802 """Starts fetching files from |isolated| that are not yet being fetched. 1849 """Starts fetching files from |isolated| that are not yet being fetched.
1803 1850
1804 Modifies self.files. 1851 Modifies self.files.
1805 """ 1852 """
1806 logging.debug('fetch_files(%s)', isolated.obj_hash) 1853 logging.debug('fetch_files(%s)', isolated.obj_hash)
1807 for filepath, properties in isolated.data.get('files', {}).iteritems(): 1854 for filepath, properties in isolated.data.get('files', {}).iteritems():
1808 # Root isolated has priority on the files being mapped. In particular, 1855 # Root isolated has priority on the files being mapped. In particular,
1809 # overridden files must not be fetched. 1856 # overridden files must not be fetched.
1810 if filepath not in self.files: 1857 if filepath not in self.files:
1811 self.files[filepath] = properties 1858 self.files[filepath] = properties
1859
1860 # Make sure if the isolated is read only, the mode doesn't have write
1861 # bits.
1862 if 'm' in properties and self.read_only:
1863 properties['m'] &= ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)
1864
1865 # Preemptively request hashed files.
1812 if 'h' in properties: 1866 if 'h' in properties:
1813 # Preemptively request files.
1814 logging.debug('fetching %s', filepath) 1867 logging.debug('fetching %s', filepath)
1815 fetch_queue.add( 1868 fetch_queue.add(
1816 properties['h'], properties['s'], threading_utils.PRIORITY_MED) 1869 properties['h'], properties['s'], threading_utils.PRIORITY_MED)
1817 1870
1818 def _update_self(self, node): 1871 def _update_self(self, node):
1819 """Extracts bundle global parameters from loaded *.isolated file. 1872 """Extracts bundle global parameters from loaded *.isolated file.
1820 1873
1821 Will be called with each loaded *.isolated file in order of traversal of 1874 Will be called with each loaded *.isolated file in order of traversal of
1822 isolated include graph (see isolated_format.walk_includes). 1875 isolated include graph (see isolated_format.walk_includes).
1823 """ 1876 """
(...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after
1965 logging.info('Retrieving remaining files (%d of them)...', 2018 logging.info('Retrieving remaining files (%d of them)...',
1966 fetch_queue.pending_count) 2019 fetch_queue.pending_count)
1967 last_update = time.time() 2020 last_update = time.time()
1968 with threading_utils.DeadlockDetector(DEADLOCK_TIMEOUT) as detector: 2021 with threading_utils.DeadlockDetector(DEADLOCK_TIMEOUT) as detector:
1969 while remaining: 2022 while remaining:
1970 detector.ping() 2023 detector.ping()
1971 2024
1972 # Wait for any item to finish fetching to cache. 2025 # Wait for any item to finish fetching to cache.
1973 digest = fetch_queue.wait(remaining) 2026 digest = fetch_queue.wait(remaining)
1974 2027
1975 # Link corresponding files to a fetched item in cache. 2028 # Create the files in the destination using item in cache as the
2029 # source.
1976 for filepath, props in remaining.pop(digest): 2030 for filepath, props in remaining.pop(digest):
1977 dest = os.path.join(outdir, filepath) 2031 fullpath = os.path.join(outdir, filepath)
1978 if os.path.exists(dest): 2032
1979 raise AlreadyExists('File %s already exists' % dest) 2033 with cache.getfileobj(digest) as srcfileobj:
1980 cache.hardlink(digest, dest, props.get('m')) 2034 filetype = props.get('t', 'basic')
2035
2036 if filetype == 'basic':
2037 file_mode = props.get('m')
2038 if file_mode:
2039 # Ignore all bits apart from the user
2040 file_mode &= 0700
2041 putfile(srcfileobj, fullpath, file_mode)
2042
2043 elif filetype == 'smallfiles-archive':
2044 basedir = os.path.dirname(fullpath)
2045 extractor = arfile.ArFileReader(srcfileobj, fullparse=False)
2046 for ai, ifd in extractor:
2047 fp = os.path.normpath(os.path.join(basedir, ai.name))
2048 file_path.ensure_tree(os.path.dirname(fp))
2049 putfile(ifd, fp, ai.mode, ai.size)
2050
2051 else:
2052 raise isolated_format.IsolatedError(
2053 'Unknown file type %r', filetype)
1981 2054
1982 # Report progress. 2055 # Report progress.
1983 duration = time.time() - last_update 2056 duration = time.time() - last_update
1984 if duration > DELAY_BETWEEN_UPDATES_IN_SECS: 2057 if duration > DELAY_BETWEEN_UPDATES_IN_SECS:
1985 msg = '%d files remaining...' % len(remaining) 2058 msg = '%d files remaining...' % len(remaining)
1986 print msg 2059 print msg
1987 logging.info(msg) 2060 logging.info(msg)
1988 last_update = time.time() 2061 last_update = time.time()
1989 2062
1990 # Cache could evict some items we just tried to fetch, it's a fatal error. 2063 # Cache could evict some items we just tried to fetch, it's a fatal error.
(...skipping 316 matching lines...) Expand 10 before | Expand all | Expand 10 after
2307 dispatcher = subcommand.CommandDispatcher(__name__) 2380 dispatcher = subcommand.CommandDispatcher(__name__)
2308 return dispatcher.execute(OptionParserIsolateServer(), args) 2381 return dispatcher.execute(OptionParserIsolateServer(), args)
2309 2382
2310 2383
2311 if __name__ == '__main__': 2384 if __name__ == '__main__':
2312 subprocess42.inhibit_os_error_reporting() 2385 subprocess42.inhibit_os_error_reporting()
2313 fix_encoding.fix_encoding() 2386 fix_encoding.fix_encoding()
2314 tools.disable_buffering() 2387 tools.disable_buffering()
2315 colorama.init() 2388 colorama.init()
2316 sys.exit(main(sys.argv[1:])) 2389 sys.exit(main(sys.argv[1:]))
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698