Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(51)

Side by Side Diff: client/isolateserver.py

Issue 2414543003: isolateserver: DiskCache format v2 (Closed)
Patch Set: docs Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | client/tests/isolateserver_test.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright 2013 The LUCI Authors. All rights reserved. 2 # Copyright 2013 The LUCI Authors. All rights reserved.
3 # Use of this source code is governed under the Apache License, Version 2.0 3 # Use of this source code is governed under the Apache License, Version 2.0
4 # that can be found in the LICENSE file. 4 # that can be found in the LICENSE file.
5 5
6 """Archives a set of files or directories to an Isolate Server.""" 6 """Archives a set of files or directories to an Isolate Server."""
7 7
8 __version__ = '0.6.0' 8 __version__ = '0.6.0'
M-A Ruel 2016/10/13 02:19:37 bump
9 9
10 import base64 10 import base64
11 import errno 11 import errno
12 import functools 12 import functools
13 import io 13 import io
14 import logging 14 import logging
15 import optparse 15 import optparse
16 import os 16 import os
17 import re 17 import re
18 import signal 18 import signal
(...skipping 1455 matching lines...) Expand 10 before | Expand all | Expand 10 after
1474 0, it unconditionally fill the disk. 1474 0, it unconditionally fill the disk.
1475 - max_items: Maximum number of items to keep in the cache. If 0, do not 1475 - max_items: Maximum number of items to keep in the cache. If 0, do not
1476 enforce a limit. 1476 enforce a limit.
1477 """ 1477 """
1478 self.max_cache_size = max_cache_size 1478 self.max_cache_size = max_cache_size
1479 self.min_free_space = min_free_space 1479 self.min_free_space = min_free_space
1480 self.max_items = max_items 1480 self.max_items = max_items
1481 1481
1482 1482
1483 class DiskCache(LocalCache): 1483 class DiskCache(LocalCache):
1484 """Stateful LRU cache in a flat hash table in a directory. 1484 """Stateful LRU cache in a semi-flat hash table in a directory.
1485 1485
1486 Saves its state as json file. 1486 Saves its state as json file.
1487 """ 1487 """
1488 VERSION = 2
1489 VERSION_FILE = u'VERSION'
1488 STATE_FILE = u'state.json' 1490 STATE_FILE = u'state.json'
1489 1491
1490 def __init__(self, cache_dir, policies, hash_algo): 1492 # All protected methods (starting with '_') except _path should be called
1493 # with self._lock held.
1494
1495 def __init__(self, cache_dir, policies, hash_algo, time_fn=None):
1491 """ 1496 """
1492 Arguments: 1497 Arguments:
1493 cache_dir: directory where to place the cache. 1498 cache_dir: directory where to place the cache.
1494 policies: cache retention policies. 1499 policies: cache retention policies.
1495 algo: hashing algorithm used. 1500 algo: hashing algorithm used.
1501 time_fn: function to take current timestamp when adding new items.
1502 Defaults to time.time.
1496 """ 1503 """
1497 # All protected methods (starting with '_') except _path should be called
1498 # with self._lock held.
1499 super(DiskCache, self).__init__() 1504 super(DiskCache, self).__init__()
1505 self.time_fn = time_fn or time.time
1500 self.cache_dir = cache_dir 1506 self.cache_dir = cache_dir
1501 self.policies = policies 1507 self.policies = policies
1502 self.hash_algo = hash_algo 1508 self.hash_algo = hash_algo
1503 self.state_file = os.path.join(cache_dir, self.STATE_FILE) 1509 self.state_file = os.path.join(cache_dir, self.STATE_FILE)
1504 # Items in a LRU lookup dict(digest: size). 1510 self.version_file = os.path.join(cache_dir, self.VERSION_FILE)
M-A Ruel 2016/10/13 02:19:37 I really prefer to keep using a single file, not t
1511 # Items in a LRU lookup dict(digest: [size, timestamp]).
1512 # We use lists instead of tuples because JSON arrays are parsed to lists.
1505 self._lru = lru.LRUDict() 1513 self._lru = lru.LRUDict()
1506 # Current cached free disk space. It is updated by self._trim(). 1514 # Current cached free disk space. It is updated by self._trim().
1507 self._free_disk = 0 1515 self._free_disk = 0
1508 # The first item in the LRU cache that must not be evicted during this run 1516 # The first item in the LRU cache that must not be evicted during this run
1509 # since it was referenced. All items more recent that _protected in the LRU 1517 # since it was referenced. All items more recent that _protected in the LRU
1510 # cache are also inherently protected. It could be a set() of all items 1518 # cache are also inherently protected. It could be a set() of all items
1511 # referenced but this increases memory usage without a use case. 1519 # referenced but this increases memory usage without a use case.
1512 self._protected = None 1520 self._protected = None
1513 # Cleanup operations done by self._load(), if any. 1521 # Cleanup operations done by self._load(), if any.
1514 self._operations = [] 1522 self._operations = []
1523
1524 self._free_disk = file_path.get_free_space(self.cache_dir)
1525
1515 with tools.Profiler('Setup'): 1526 with tools.Profiler('Setup'):
1516 with self._lock: 1527 with self._lock:
1517 # self._load() calls self._trim() which initializes self._free_disk.
1518 self._load() 1528 self._load()
1519 1529
1520 def __contains__(self, digest): 1530 def __contains__(self, digest):
1521 with self._lock: 1531 with self._lock:
1522 return digest in self._lru 1532 return digest in self._lru
1523 1533
1524 def __enter__(self): 1534 def __enter__(self):
1525 return self 1535 return self
1526 1536
1527 def __exit__(self, _exc_type, _exec_value, _traceback): 1537 def __exit__(self, _exc_type, _exec_value, _traceback):
1528 with tools.Profiler('CleanupTrimming'): 1538 with tools.Profiler('CleanupTrimming'):
1529 with self._lock: 1539 with self._lock:
1530 self._trim() 1540 self._trim()
1531 1541
1532 logging.info( 1542 logging.info(
1533 '%5d (%8dkb) added', 1543 '%5d (%8dkb) added',
1534 len(self._added), sum(self._added) / 1024) 1544 len(self._added), sum(self._added) / 1024)
1535 logging.info( 1545 logging.info(
1536 '%5d (%8dkb) current', 1546 '%5d (%8dkb) current',
1537 len(self._lru), 1547 len(self._lru),
1538 sum(self._lru.itervalues()) / 1024) 1548 self._cache_disk_size() / 1024)
1539 logging.info( 1549 logging.info(
1540 '%5d (%8dkb) evicted', 1550 '%5d (%8dkb) evicted',
1541 len(self._evicted), sum(self._evicted) / 1024) 1551 len(self._evicted), sum(self._evicted) / 1024)
1542 logging.info( 1552 logging.info(
1543 ' %8dkb free', 1553 ' %8dkb free',
1544 self._free_disk / 1024) 1554 self._free_disk / 1024)
1545 return False 1555 return False
1546 1556
1557 def _sizes(self):
1558 """Returns an iterator of pairs (digest, size)."""
1559 return (
1560 (digest, size)
1561 for digest, (size, _) in self._lru._items.iteritems())
1562
1547 def cached_set(self): 1563 def cached_set(self):
1548 with self._lock: 1564 with self._lock:
1549 return self._lru.keys_set() 1565 return self._lru.keys_set()
1550 1566
1551 def cleanup(self): 1567 def cleanup(self):
1552 """Cleans up the cache directory. 1568 """Cleans up the cache directory.
1553 1569
1554 Ensures there is no unknown files in cache_dir. 1570 Ensures there is no unknown files in cache_dir.
1555 Ensures the read-only bits are set correctly. 1571 Ensures the read-only bits are set correctly.
1556 1572
1557 At that point, the cache was already loaded, trimmed to respect cache 1573 At that point, the cache was already loaded, trimmed to respect cache
1558 policies. 1574 policies.
1559 """ 1575 """
1576 def try_remove(filename):
1577 assert os.path.isabs(filename)
1578 logging.warning('Removing unknown file %s from cache', filename)
1579 if fs.isdir(filename):
1580 try:
1581 file_path.rmtree(filename)
1582 except OSError:
1583 pass
1584 else:
1585 file_path.try_remove(filename)
1586
1560 fs.chmod(self.cache_dir, 0700) 1587 fs.chmod(self.cache_dir, 0700)
1561 # Ensure that all files listed in the state still exist and add new ones. 1588 # Ensure that all files listed in the state still exist and add new ones.
1562 previous = self._lru.keys_set() 1589 previous = self._lru.keys_set()
1563 # It'd be faster if there were a readdir() function. 1590 # It'd be faster if there were a readdir() function.
1564 for filename in fs.listdir(self.cache_dir): 1591 for filename_l1 in fs.listdir(self.cache_dir):
1565 if filename == self.STATE_FILE: 1592 full_name_l1 = os.path.join(self.cache_dir, filename_l1)
1566 fs.chmod(os.path.join(self.cache_dir, filename), 0600) 1593 if filename_l1 in (self.STATE_FILE, self.VERSION_FILE):
1594 fs.chmod(full_name_l1, 0600)
1567 continue 1595 continue
1568 if filename in previous: 1596 if len(filename_l1) != 2:
1569 fs.chmod(os.path.join(self.cache_dir, filename), 0400) 1597 try_remove(full_name_l1)
1570 previous.remove(filename)
1571 continue 1598 continue
1572 1599 for filename_l2 in fs.listdir(full_name_l1):
1573 # An untracked file. Delete it. 1600 digest = filename_l1 + filename_l2
1574 logging.warning('Removing unknown file %s from cache', filename) 1601 full_name_l2 = os.path.join(full_name_l1, filename_l2)
1575 p = self._path(filename) 1602 if digest in previous:
1576 if fs.isdir(p): 1603 fs.chmod(full_name_l2, 0400)
1577 try: 1604 previous.remove(digest)
1578 file_path.rmtree(p) 1605 else:
1579 except OSError: 1606 try_remove(full_name_l2)
1580 pass
1581 else:
1582 file_path.try_remove(p)
1583 continue
1584 1607
1585 if previous: 1608 if previous:
1586 # Filter out entries that were not found. 1609 # Filter out entries that were not found.
1587 logging.warning('Removed %d lost files', len(previous)) 1610 logging.warning('Removed %d lost files', len(previous))
1588 for filename in previous: 1611 for digest in previous:
1589 self._lru.pop(filename) 1612 self._lru.pop(digest)
1590 1613
1591 # What remains to be done is to hash every single item to 1614 # What remains to be done is to hash every single item to
1592 # detect corruption, then save to ensure state.json is up to date. 1615 # detect corruption, then save to ensure state.json is up to date.
1593 # Sadly, on a 50Gb cache with 100mib/s I/O, this is still over 8 minutes. 1616 # Sadly, on a 50Gb cache with 100mib/s I/O, this is still over 8 minutes.
1594 # TODO(maruel): Let's revisit once directory metadata is stored in 1617 # TODO(maruel): Let's revisit once directory metadata is stored in
1595 # state.json so only the files that had been mapped since the last cleanup() 1618 # state.json so only the files that had been mapped since the last cleanup()
1596 # call are manually verified. 1619 # call are manually verified.
1597 # 1620 #
1598 #with self._lock: 1621 #with self._lock:
1599 # for digest in self._lru: 1622 # for digest in self._lru:
(...skipping 11 matching lines...) Expand all
1611 TODO(maruel): More stringent verification while keeping the check fast. 1634 TODO(maruel): More stringent verification while keeping the check fast.
1612 """ 1635 """
1613 # Do the check outside the lock. 1636 # Do the check outside the lock.
1614 if not is_valid_file(self._path(digest), size): 1637 if not is_valid_file(self._path(digest), size):
1615 return False 1638 return False
1616 1639
1617 # Update it's LRU position. 1640 # Update it's LRU position.
1618 with self._lock: 1641 with self._lock:
1619 if digest not in self._lru: 1642 if digest not in self._lru:
1620 return False 1643 return False
1621 self._lru.touch(digest) 1644 self._lru.add(digest, [size, self.time_fn()])
M-A Ruel 2016/10/13 02:19:37 Please remove the touch method from LRUDict to be
1622 self._protected = self._protected or digest 1645 self._protected = self._protected or digest
1623 return True 1646 return True
1624 1647
1625 def evict(self, digest): 1648 def evict(self, digest):
1626 with self._lock: 1649 with self._lock:
1627 # Do not check for 'digest == self._protected' since it could be because 1650 # Do not check for 'digest == self._protected' since it could be because
1628 # the object is corrupted. 1651 # the object is corrupted.
1629 self._lru.pop(digest) 1652 self._lru.pop(digest)
1630 self._delete_file(digest, UNKNOWN_FILE_SIZE) 1653 self._delete_file(digest, UNKNOWN_FILE_SIZE)
1631 1654
1632 def getfileobj(self, digest): 1655 def getfileobj(self, digest):
1633 try: 1656 try:
1634 f = fs.open(self._path(digest), 'rb') 1657 f = fs.open(self._path(digest), 'rb')
1635 with self._lock: 1658 with self._lock:
1636 self._used.append(self._lru[digest]) 1659 self._used.append(self._lru[digest][0])
1637 return f 1660 return f
1638 except IOError: 1661 except IOError:
1639 raise CacheMiss(digest) 1662 raise CacheMiss(digest)
1640 1663
1641 def write(self, digest, content): 1664 def write(self, digest, content):
1642 assert content is not None 1665 assert content is not None
1643 with self._lock: 1666 with self._lock:
1644 self._protected = self._protected or digest 1667 self._protected = self._protected or digest
1645 path = self._path(digest) 1668 path = self._path(digest)
1646 # A stale broken file may remain. It is possible for the file to have write 1669 # A stale broken file may remain. It is possible for the file to have write
(...skipping 11 matching lines...) Expand all
1658 file_path.try_remove(path) 1681 file_path.try_remove(path)
1659 raise 1682 raise
1660 # Make the file read-only in the cache. This has a few side-effects since 1683 # Make the file read-only in the cache. This has a few side-effects since
1661 # the file node is modified, so every directory entries to this file becomes 1684 # the file node is modified, so every directory entries to this file becomes
1662 # read-only. It's fine here because it is a new file. 1685 # read-only. It's fine here because it is a new file.
1663 file_path.set_read_only(path, True) 1686 file_path.set_read_only(path, True)
1664 with self._lock: 1687 with self._lock:
1665 self._add(digest, size) 1688 self._add(digest, size)
1666 return digest 1689 return digest
1667 1690
1691 def _cache_disk_size(self):
1692 """Returns number of bytes that cache files take."""
1693 return sum(size for (size, _) in self._lru.itervalues())
1694
1668 def _load(self): 1695 def _load(self):
1669 """Loads state of the cache from json file. 1696 """Loads state of the cache from json file.
1670 1697
1671 If cache_dir does not exist on disk, it is created. 1698 If cache_dir does not exist on disk, it is created.
1672 """ 1699 """
1673 self._lock.assert_locked() 1700 self._lock.assert_locked()
1674 1701
1675 if not fs.isfile(self.state_file): 1702 # Read version file.
1676 if not os.path.isdir(self.cache_dir): 1703 version = None
1677 fs.makedirs(self.cache_dir) 1704 try:
1705 with fs.open(self.version_file, 'r') as f:
1706 version = f.read()
1707 except IOError:
1708 pass
1709 else:
1710 try:
1711 version = int(version)
1712 except ValueError:
1713 logging.error('%s is corrupted: not an integer', self.version_file)
1714
1715 if version != self.VERSION:
1716 # Possibly, cache dir is in the old format.
1717 file_path.try_remove(self.cache_dir)
1718 self._lru = lru.LRUDict()
1719 self._save() # create state.json
1678 else: 1720 else:
1679 # Load state of the cache. 1721 # Load state of the cache.
1680 try: 1722 try:
1681 self._lru = lru.LRUDict.load(self.state_file) 1723 self._lru = lru.LRUDict.load(self.state_file)
1682 except ValueError as err: 1724 except ValueError as err:
1683 logging.error('Failed to load cache state: %s' % (err,)) 1725 logging.error('Failed to load cache state: %s' % (err,))
1684 # Don't want to keep broken state file. 1726 # Don't want to keep broken state file.
1685 file_path.try_remove(self.state_file) 1727 file_path.try_remove(self.state_file)
1686 self._trim() 1728 self._trim()
1687 # We want the initial cache size after trimming, i.e. what is readily 1729 # We want the initial cache size after trimming, i.e. what is readily
1688 # avaiable. 1730 # avaiable.
1689 self._initial_number_items = len(self._lru) 1731 self._initial_number_items = len(self._lru)
1690 self._initial_size = sum(self._lru.itervalues()) 1732 self._initial_size = self._cache_disk_size()
1691 if self._evicted: 1733 if self._evicted:
1692 logging.info( 1734 logging.info(
1693 'Trimming evicted items with the following sizes: %s', 1735 'Trimming evicted items with the following sizes: %s',
1694 sorted(self._evicted)) 1736 sorted(self._evicted))
1695 1737
1696 def _save(self): 1738 def _save(self):
1697 """Saves the LRU ordering.""" 1739 """Saves the LRU ordering."""
1698 self._lock.assert_locked() 1740 self._lock.assert_locked()
1699 if sys.platform != 'win32': 1741 if sys.platform != 'win32':
1700 d = os.path.dirname(self.state_file) 1742 d = os.path.dirname(self.state_file)
1701 if fs.isdir(d): 1743 if fs.isdir(d):
1702 # Necessary otherwise the file can't be created. 1744 # Necessary otherwise the file can't be created.
1703 file_path.set_read_only(d, False) 1745 file_path.set_read_only(d, False)
1746
1747 if fs.isfile(self.version_file):
1748 file_path.set_read_only(self.version_file, False)
1749 file_write(self.version_file, [str(self.VERSION)])
1750
1704 if fs.isfile(self.state_file): 1751 if fs.isfile(self.state_file):
1705 file_path.set_read_only(self.state_file, False) 1752 file_path.set_read_only(self.state_file, False)
1706 self._lru.save(self.state_file) 1753 self._lru.save(self.state_file)
1707 1754
1708 def _trim(self): 1755 def _trim(self):
1709 """Trims anything we don't know, make sure enough free space exists.""" 1756 """Trims anything we don't know, make sure enough free space exists."""
1710 self._lock.assert_locked() 1757 self._lock.assert_locked()
1711 1758
1712 # Ensure maximum cache size. 1759 # Ensure maximum cache size.
1713 if self.policies.max_cache_size: 1760 if self.policies.max_cache_size:
1714 total_size = sum(self._lru.itervalues()) 1761 total_size = self._cache_disk_size()
1715 while total_size > self.policies.max_cache_size: 1762 while total_size > self.policies.max_cache_size:
1716 total_size -= self._remove_lru_file(True) 1763 total_size -= self._remove_lru_file(True)
1717 1764
1718 # Ensure maximum number of items in the cache. 1765 # Ensure maximum number of items in the cache.
1719 if self.policies.max_items and len(self._lru) > self.policies.max_items: 1766 if self.policies.max_items and len(self._lru) > self.policies.max_items:
1720 for _ in xrange(len(self._lru) - self.policies.max_items): 1767 for _ in xrange(len(self._lru) - self.policies.max_items):
1721 self._remove_lru_file(True) 1768 self._remove_lru_file(True)
1722 1769
1723 # Ensure enough free space. 1770 # Ensure enough free space.
1724 self._free_disk = file_path.get_free_space(self.cache_dir) 1771 self._free_disk = file_path.get_free_space(self.cache_dir)
1725 trimmed_due_to_space = 0 1772 trimmed_due_to_space = 0
1726 while ( 1773 while (
1727 self.policies.min_free_space and 1774 self.policies.min_free_space and
1728 self._lru and 1775 self._lru and
1729 self._free_disk < self.policies.min_free_space): 1776 self._free_disk < self.policies.min_free_space):
1730 trimmed_due_to_space += 1 1777 trimmed_due_to_space += 1
1731 self._remove_lru_file(True) 1778 self._remove_lru_file(True)
1732 1779
1733 if trimmed_due_to_space: 1780 if trimmed_due_to_space:
1734 total_usage = sum(self._lru.itervalues()) 1781 total_usage = self._cache_disk_size()
1735 usage_percent = 0. 1782 usage_percent = 0.
1736 if total_usage: 1783 if total_usage:
1737 usage_percent = 100. * float(total_usage) / self.policies.max_cache_size 1784 usage_percent = 100. * float(total_usage) / self.policies.max_cache_size
1738 1785
1739 logging.warning( 1786 logging.warning(
1740 'Trimmed %s file(s) due to not enough free disk space: %.1fkb free,' 1787 'Trimmed %s file(s) due to not enough free disk space: %.1fkb free,'
1741 ' %.1fkb cache (%.1f%% of its maximum capacity of %.1fkb)', 1788 ' %.1fkb cache (%.1f%% of its maximum capacity of %.1fkb)',
1742 trimmed_due_to_space, 1789 trimmed_due_to_space,
1743 self._free_disk / 1024., 1790 self._free_disk / 1024.,
1744 total_usage / 1024., 1791 total_usage / 1024.,
1745 usage_percent, 1792 usage_percent,
1746 self.policies.max_cache_size / 1024.) 1793 self.policies.max_cache_size / 1024.)
1747 self._save() 1794 self._save()
1748 1795
1749 def _path(self, digest): 1796 def _path(self, digest):
1750 """Returns the path to one item.""" 1797 """Returns the path to one item."""
1751 return os.path.join(self.cache_dir, digest) 1798 assert len(digest) > 2
1799 return os.path.join(self.cache_dir, digest[:2], digest[2:])
1752 1800
1753 def _remove_lru_file(self, allow_protected): 1801 def _remove_lru_file(self, allow_protected):
1754 """Removes the lastest recently used file and returns its size.""" 1802 """Removes the lastest recently used file and returns its size."""
1755 self._lock.assert_locked() 1803 self._lock.assert_locked()
1804
1756 try: 1805 try:
1757 digest, size = self._lru.get_oldest() 1806 digest, _ = self._lru.get_oldest()
1758 if not allow_protected and digest == self._protected: 1807 if not allow_protected and digest == self._protected:
1759 raise Error('Not enough space to map the whole isolated tree') 1808 raise Error('Not enough space to map the whole isolated tree')
1760 except KeyError: 1809 except KeyError:
1761 raise Error('Nothing to remove') 1810 raise Error('Nothing to remove')
1762 digest, size = self._lru.pop_oldest() 1811
1812 digest, (size, _) = self._lru.pop_oldest()
1763 logging.debug("Removing LRU file %s", digest) 1813 logging.debug("Removing LRU file %s", digest)
1764 self._delete_file(digest, size) 1814 self._delete_file(digest, size)
1765 return size 1815 return size
1766 1816
1767 def _add(self, digest, size=UNKNOWN_FILE_SIZE): 1817 def _add(self, digest, size=UNKNOWN_FILE_SIZE):
1768 """Adds an item into LRU cache marking it as a newest one.""" 1818 """Adds an item into LRU cache marking it as a newest one.
1819
1820 Asumes the file exists.
1821 """
1769 self._lock.assert_locked() 1822 self._lock.assert_locked()
1770 if size == UNKNOWN_FILE_SIZE: 1823 if size == UNKNOWN_FILE_SIZE:
1771 size = fs.stat(self._path(digest)).st_size 1824 size = fs.stat(self._path(digest)).st_size
1772 self._added.append(size) 1825 self._added.append(size)
1773 self._lru.add(digest, size) 1826 self._lru.add(digest, [size, self.time_fn()])
1774 self._free_disk -= size 1827 self._free_disk -= size
1775 # Do a quicker version of self._trim(). It only enforces free disk space, 1828 # Do a quicker version of self._trim(). It only enforces free disk space,
1776 # not cache size limits. It doesn't actually look at real free disk space, 1829 # not cache size limits. It doesn't actually look at real free disk space,
1777 # only uses its cache values. self._trim() will be called later to enforce 1830 # only uses its cache values. self._trim() will be called later to enforce
1778 # real trimming but doing this quick version here makes it possible to map 1831 # real trimming but doing this quick version here makes it possible to map
1779 # an isolated that is larger than the current amount of free disk space when 1832 # an isolated that is larger than the current amount of free disk space when
1780 # the cache size is already large. 1833 # the cache size is already large.
1781 while ( 1834 while (
1782 self.policies.min_free_space and 1835 self.policies.min_free_space and
1783 self._lru and 1836 self._lru and
1784 self._free_disk < self.policies.min_free_space): 1837 self._free_disk < self.policies.min_free_space):
1785 self._remove_lru_file(False) 1838 self._remove_lru_file(False)
1786 1839
1787 def _delete_file(self, digest, size=UNKNOWN_FILE_SIZE): 1840 def _delete_file(self, digest, size=UNKNOWN_FILE_SIZE):
1788 """Deletes cache file from the file system.""" 1841 """Deletes cache file from the file system."""
1789 self._lock.assert_locked() 1842 self._lock.assert_locked()
1790 try: 1843 try:
1791 if size == UNKNOWN_FILE_SIZE: 1844 if size == UNKNOWN_FILE_SIZE:
1792 size = fs.stat(self._path(digest)).st_size 1845 size = fs.stat(self._path(digest)).st_size
1793 file_path.try_remove(self._path(digest)) 1846 path = self._path(digest)
1847 file_path.try_remove(path)
1794 self._evicted.append(size) 1848 self._evicted.append(size)
1795 self._free_disk += size 1849 self._free_disk += size
1850
1851 dir = os.path.dirname(path)
1852 if len(os.listdir(dir)) == 0:
1853 fs.rmtree(dir)
1796 except OSError as e: 1854 except OSError as e:
1797 logging.error('Error attempting to delete a file %s:\n%s' % (digest, e)) 1855 logging.error('Error attempting to delete a file %s:\n%s' % (digest, e))
1798 1856
1799 1857
1800 class IsolatedBundle(object): 1858 class IsolatedBundle(object):
1801 """Fetched and parsed .isolated file with all dependencies.""" 1859 """Fetched and parsed .isolated file with all dependencies."""
1802 1860
1803 def __init__(self): 1861 def __init__(self):
1804 self.command = [] 1862 self.command = []
1805 self.files = {} 1863 self.files = {}
(...skipping 625 matching lines...) Expand 10 before | Expand all | Expand 10 after
2431 return dispatcher.execute(OptionParserIsolateServer(), args) 2489 return dispatcher.execute(OptionParserIsolateServer(), args)
2432 2490
2433 2491
2434 if __name__ == '__main__': 2492 if __name__ == '__main__':
2435 subprocess42.inhibit_os_error_reporting() 2493 subprocess42.inhibit_os_error_reporting()
2436 fix_encoding.fix_encoding() 2494 fix_encoding.fix_encoding()
2437 tools.disable_buffering() 2495 tools.disable_buffering()
2438 colorama.init() 2496 colorama.init()
2439 file_path.enable_symlink() 2497 file_path.enable_symlink()
2440 sys.exit(main(sys.argv[1:])) 2498 sys.exit(main(sys.argv[1:]))
OLDNEW
« no previous file with comments | « no previous file | client/tests/isolateserver_test.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698