Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(46)

Side by Side Diff: net/disk_cache/backend_impl.cc

Issue 8065015: Disk Cache: Improve handling of dirty entries. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: Created 9 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "net/disk_cache/backend_impl.h" 5 #include "net/disk_cache/backend_impl.h"
6 6
7 #include "base/file_path.h" 7 #include "base/file_path.h"
8 #include "base/file_util.h" 8 #include "base/file_util.h"
9 #include "base/message_loop.h" 9 #include "base/message_loop.h"
10 #include "base/metrics/field_trial.h" 10 #include "base/metrics/field_trial.h"
(...skipping 742 matching lines...) Expand 10 before | Expand all | Expand 10 after
753 DCHECK(!error); 753 DCHECK(!error);
754 if (parent_entry) { 754 if (parent_entry) {
755 parent.swap(&parent_entry); 755 parent.swap(&parent_entry);
756 } else if (data_->table[hash & mask_]) { 756 } else if (data_->table[hash & mask_]) {
757 // We should have corrected the problem. 757 // We should have corrected the problem.
758 NOTREACHED(); 758 NOTREACHED();
759 return NULL; 759 return NULL;
760 } 760 }
761 } 761 }
762 762
763 // The general flow is to allocate disk space and initialize the entry data,
764 // followed by saving that to disk, then linking the entry though the index
765 // and finally thorugh the lists. If there is a crash in this process, we may
gavinp 2011/09/29 13:25:34 typo: thorugh -> through
766 // end up with:
767 // a. Used, unreferenced empty blocks on disk (basically just garbage).
768 // b. Used, unreferenced but meaningful data on disk (more garbage).
769 // c. A fully formed entry, reachable only through the index.
770 // d. A fully formed entry, also reachable through the lists, but still dirty.
771 //
772 // Anything after (b) can be automatically cleaned up. We may consider saving
773 // the current operation (as we do while manipulating the lists) so that we
774 // can detect and cleanup (a) and (b).
775
763 int num_blocks = EntryImpl::NumBlocksForEntry(key.size()); 776 int num_blocks = EntryImpl::NumBlocksForEntry(key.size());
764 if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) { 777 if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) {
765 LOG(ERROR) << "Create entry failed " << key.c_str(); 778 LOG(ERROR) << "Create entry failed " << key.c_str();
766 stats_.OnEvent(Stats::CREATE_ERROR); 779 stats_.OnEvent(Stats::CREATE_ERROR);
767 return NULL; 780 return NULL;
768 } 781 }
769 782
770 Addr node_address(0); 783 Addr node_address(0);
771 if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) { 784 if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) {
772 block_files_.DeleteBlock(entry_address, false); 785 block_files_.DeleteBlock(entry_address, false);
(...skipping 12 matching lines...) Expand all
785 LOG(ERROR) << "Create entry failed " << key.c_str(); 798 LOG(ERROR) << "Create entry failed " << key.c_str();
786 stats_.OnEvent(Stats::CREATE_ERROR); 799 stats_.OnEvent(Stats::CREATE_ERROR);
787 return NULL; 800 return NULL;
788 } 801 }
789 802
790 cache_entry->BeginLogging(net_log_, true); 803 cache_entry->BeginLogging(net_log_, true);
791 804
792 // We are not failing the operation; let's add this to the map. 805 // We are not failing the operation; let's add this to the map.
793 open_entries_[entry_address.value()] = cache_entry; 806 open_entries_[entry_address.value()] = cache_entry;
794 807
795 if (parent.get()) 808 // Save the entry.
796 parent->SetNextAddress(entry_address);
797
798 block_files_.GetFile(entry_address)->Store(cache_entry->entry()); 809 block_files_.GetFile(entry_address)->Store(cache_entry->entry());
799 block_files_.GetFile(node_address)->Store(cache_entry->rankings()); 810 block_files_.GetFile(node_address)->Store(cache_entry->rankings());
811 IncreaseNumEntries();
812 entry_count_++;
800 813
801 IncreaseNumEntries(); 814 // Link this entry through the index.
815 if (parent.get()) {
816 parent->SetNextAddress(entry_address);
817 } else {
818 data_->table[hash & mask_] = entry_address.value();
819 }
820
821 // Link this entry through the lists.
802 eviction_.OnCreateEntry(cache_entry); 822 eviction_.OnCreateEntry(cache_entry);
803 entry_count_++;
804 if (!parent.get())
805 data_->table[hash & mask_] = entry_address.value();
806 823
807 CACHE_UMA(AGE_MS, "CreateTime", GetSizeGroup(), start); 824 CACHE_UMA(AGE_MS, "CreateTime", GetSizeGroup(), start);
808 stats_.OnEvent(Stats::CREATE_HIT); 825 stats_.OnEvent(Stats::CREATE_HIT);
809 SIMPLE_STATS_COUNTER("disk_cache.miss"); 826 SIMPLE_STATS_COUNTER("disk_cache.miss");
810 Trace("create entry hit "); 827 Trace("create entry hit ");
811 return cache_entry.release(); 828 return cache_entry.release();
812 } 829 }
813 830
814 EntryImpl* BackendImpl::OpenNextEntryImpl(void** iter) { 831 EntryImpl* BackendImpl::OpenNextEntryImpl(void** iter) {
815 return OpenFollowingEntry(true, iter); 832 return OpenFollowingEntry(true, iter);
(...skipping 739 matching lines...) Expand 10 before | Expand all | Expand 10 after
1555 LOG(WARNING) << "Messed up entry found."; 1572 LOG(WARNING) << "Messed up entry found.";
1556 return ERR_INVALID_ENTRY; 1573 return ERR_INVALID_ENTRY;
1557 } 1574 }
1558 1575
1559 if (!cache_entry->LoadNodeAddress()) 1576 if (!cache_entry->LoadNodeAddress())
1560 return ERR_READ_FAILURE; 1577 return ERR_READ_FAILURE;
1561 1578
1562 // Prevent overwriting the dirty flag on the destructor. 1579 // Prevent overwriting the dirty flag on the destructor.
1563 cache_entry->SetDirtyFlag(GetCurrentEntryId()); 1580 cache_entry->SetDirtyFlag(GetCurrentEntryId());
1564 1581
1565 if (!rankings_.SanityCheck(cache_entry->rankings(), false)) 1582 if (!rankings_.SanityCheck(cache_entry->rankings(), false)) {
1566 return ERR_INVALID_LINKS; 1583 cache_entry->SetDirtyFlag(0);
1584 // Don't remove this from the list (it is not linked properly). Instead,
1585 // break the link back to the entry because it is going away, and leave the
1586 // rankings node to be deleted if we find it through a list.
1587 rankings_.SetContents(cache_entry->rankings(), 0);
1588 } else if (!rankings_.DataSanityCheck(cache_entry->rankings(), false)) {
1589 cache_entry->SetDirtyFlag(0);
1590 rankings_.SetContents(cache_entry->rankings(), address.value());
1591 }
1592
1593 if (!cache_entry->DataSanityCheck()) {
1594 LOG(WARNING) << "Messed up entry found.";
1595 cache_entry->SetDirtyFlag(0);
1596 cache_entry->FixForDelete();
1597 }
1567 1598
1568 if (cache_entry->dirty()) { 1599 if (cache_entry->dirty()) {
1569 Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry.get()), 1600 Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry.get()),
1570 address.value()); 1601 address.value());
1571 } 1602 }
1572 1603
1573 open_entries_[address.value()] = cache_entry; 1604 open_entries_[address.value()] = cache_entry;
1574 1605
1575 cache_entry->BeginLogging(net_log_, false); 1606 cache_entry->BeginLogging(net_log_, false);
1576 cache_entry.swap(entry); 1607 cache_entry.swap(entry);
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after
1706 return NULL; 1737 return NULL;
1707 } else { 1738 } else {
1708 // Get the next entry from the last list, and the actual entries for the 1739 // Get the next entry from the last list, and the actual entries for the
1709 // elements on the other lists. 1740 // elements on the other lists.
1710 for (int i = 0; i < kListsToSearch; i++) { 1741 for (int i = 0; i < kListsToSearch; i++) {
1711 EntryImpl* temp = NULL; 1742 EntryImpl* temp = NULL;
1712 if (iterator->list == i) { 1743 if (iterator->list == i) {
1713 OpenFollowingEntryFromList(forward, iterator->list, 1744 OpenFollowingEntryFromList(forward, iterator->list,
1714 &iterator->nodes[i], &temp); 1745 &iterator->nodes[i], &temp);
1715 } else { 1746 } else {
1716 temp = GetEnumeratedEntry(iterator->nodes[i]); 1747 temp = GetEnumeratedEntry(iterator->nodes[i],
1748 static_cast<Rankings::List>(i));
1717 } 1749 }
1718 1750
1719 entries[i].swap(&temp); // The entry was already addref'd. 1751 entries[i].swap(&temp); // The entry was already addref'd.
1720 } 1752 }
1721 } 1753 }
1722 1754
1723 int newest = -1; 1755 int newest = -1;
1724 int oldest = -1; 1756 int oldest = -1;
1725 Time access_times[kListsToSearch]; 1757 Time access_times[kListsToSearch];
1726 for (int i = 0; i < kListsToSearch; i++) { 1758 for (int i = 0; i < kListsToSearch; i++) {
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
1763 if (!new_eviction_ && Rankings::NO_USE != list) 1795 if (!new_eviction_ && Rankings::NO_USE != list)
1764 return false; 1796 return false;
1765 1797
1766 Rankings::ScopedRankingsBlock rankings(&rankings_, *from_entry); 1798 Rankings::ScopedRankingsBlock rankings(&rankings_, *from_entry);
1767 CacheRankingsBlock* next_block = forward ? 1799 CacheRankingsBlock* next_block = forward ?
1768 rankings_.GetNext(rankings.get(), list) : 1800 rankings_.GetNext(rankings.get(), list) :
1769 rankings_.GetPrev(rankings.get(), list); 1801 rankings_.GetPrev(rankings.get(), list);
1770 Rankings::ScopedRankingsBlock next(&rankings_, next_block); 1802 Rankings::ScopedRankingsBlock next(&rankings_, next_block);
1771 *from_entry = NULL; 1803 *from_entry = NULL;
1772 1804
1773 *next_entry = GetEnumeratedEntry(next.get()); 1805 *next_entry = GetEnumeratedEntry(next.get(), list);
1774 if (!*next_entry) 1806 if (!*next_entry)
1775 return false; 1807 return false;
1776 1808
1777 *from_entry = next.release(); 1809 *from_entry = next.release();
1778 return true; 1810 return true;
1779 } 1811 }
1780 1812
1781 EntryImpl* BackendImpl::GetEnumeratedEntry(CacheRankingsBlock* next) { 1813 EntryImpl* BackendImpl::GetEnumeratedEntry(CacheRankingsBlock* next,
1814 Rankings::List list) {
1782 if (!next || disabled_) 1815 if (!next || disabled_)
1783 return NULL; 1816 return NULL;
1784 1817
1785 EntryImpl* entry; 1818 EntryImpl* entry;
1786 if (NewEntry(Addr(next->Data()->contents), &entry)) { 1819 int rv = NewEntry(Addr(next->Data()->contents), &entry);
1787 // TODO(rvargas) bug 73102: We should remove this node from the list, and 1820 if (rv) {
1788 // maybe do a better cleanup. 1821 rankings_.Remove(next, list, false);
1822 if (rv == ERR_INVALID_ADDRESS) {
1823 // There is nothing linked from the index. Delete the rankings node.
1824 DeleteBlock(next->address(), true);
1825 }
1789 return NULL; 1826 return NULL;
1790 } 1827 }
1791 1828
1792 if (entry->dirty()) { 1829 if (entry->dirty()) {
1793 // We cannot trust this entry. 1830 // We cannot trust this entry.
1794 InternalDoomEntry(entry); 1831 InternalDoomEntry(entry);
1795 entry->Release(); 1832 entry->Release();
1796 return NULL; 1833 return NULL;
1797 } 1834 }
1798 1835
(...skipping 321 matching lines...) Expand 10 before | Expand all | Expand 10 after
2120 if (total_memory > kMaxBuffersSize || total_memory <= 0) 2157 if (total_memory > kMaxBuffersSize || total_memory <= 0)
2121 total_memory = kMaxBuffersSize; 2158 total_memory = kMaxBuffersSize;
2122 2159
2123 done = true; 2160 done = true;
2124 } 2161 }
2125 2162
2126 return static_cast<int>(total_memory); 2163 return static_cast<int>(total_memory);
2127 } 2164 }
2128 2165
2129 } // namespace disk_cache 2166 } // namespace disk_cache
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698