Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(66)

Side by Side Diff: net/disk_cache/memory/mem_backend_impl.cc

Issue 1715833002: Reland: Refactor and shorten in-memory cache. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: fix typo in comment Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "net/disk_cache/memory/mem_backend_impl.h" 5 #include "net/disk_cache/memory/mem_backend_impl.h"
6 6
7 #include <algorithm>
8 #include <functional>
7 #include <utility> 9 #include <utility>
8 10
9 #include "base/logging.h" 11 #include "base/logging.h"
10 #include "base/sys_info.h" 12 #include "base/sys_info.h"
11 #include "net/base/net_errors.h" 13 #include "net/base/net_errors.h"
12 #include "net/disk_cache/cache_util.h" 14 #include "net/disk_cache/cache_util.h"
13 #include "net/disk_cache/memory/mem_entry_impl.h" 15 #include "net/disk_cache/memory/mem_entry_impl.h"
14 16
15 using base::Time; 17 using base::Time;
16 18
19 namespace disk_cache {
20
17 namespace { 21 namespace {
18 22
19 const int kDefaultInMemoryCacheSize = 10 * 1024 * 1024; 23 const int kDefaultInMemoryCacheSize = 10 * 1024 * 1024;
20 const int kCleanUpMargin = 1024 * 1024; 24 const int kDefaultEvictionSize = kDefaultInMemoryCacheSize / 10;
21 25
22 int LowWaterAdjust(int high_water) { 26 bool CheckLRUListOrder(const base::LinkedList<MemEntryImpl>& lru_list) {
23 if (high_water < kCleanUpMargin) 27 // TODO(gavinp): Check MemBackendImpl::current_size_ here as well.
24 return 0; 28 base::Time previous_last_use_time;
25 29 for (base::LinkNode<MemEntryImpl>* node = lru_list.head();
26 return high_water - kCleanUpMargin; 30 node != lru_list.end(); node = node->next()) {
31 if (node->value()->GetLastUsed() < previous_last_use_time)
32 return false;
33 previous_last_use_time = node->value()->GetLastUsed();
34 }
35 return true;
27 } 36 }
28 37
29 } // namespace 38 } // namespace
30 39
31 namespace disk_cache {
32
33 MemBackendImpl::MemBackendImpl(net::NetLog* net_log) 40 MemBackendImpl::MemBackendImpl(net::NetLog* net_log)
34 : max_size_(0), current_size_(0), net_log_(net_log), weak_factory_(this) { 41 : max_size_(0), current_size_(0), net_log_(net_log), weak_factory_(this) {
35 } 42 }
36 43
37 MemBackendImpl::~MemBackendImpl() { 44 MemBackendImpl::~MemBackendImpl() {
38 EntryMap::iterator it = entries_.begin(); 45 DCHECK(CheckLRUListOrder(lru_list_));
39 while (it != entries_.end()) { 46 while (!entries_.empty())
40 it->second->Doom(); 47 entries_.begin()->second->Doom();
41 it = entries_.begin();
42 }
43 DCHECK(!current_size_); 48 DCHECK(!current_size_);
44 } 49 }
45 50
46 // Static. 51 // static
47 scoped_ptr<Backend> MemBackendImpl::CreateBackend(int max_bytes, 52 scoped_ptr<Backend> MemBackendImpl::CreateBackend(int max_bytes,
48 net::NetLog* net_log) { 53 net::NetLog* net_log) {
49 scoped_ptr<MemBackendImpl> cache(new MemBackendImpl(net_log)); 54 scoped_ptr<MemBackendImpl> cache(new MemBackendImpl(net_log));
50 cache->SetMaxSize(max_bytes); 55 cache->SetMaxSize(max_bytes);
51 if (cache->Init()) 56 if (cache->Init())
52 return std::move(cache); 57 return std::move(cache);
53 58
54 LOG(ERROR) << "Unable to create cache"; 59 LOG(ERROR) << "Unable to create cache";
55 return nullptr; 60 return nullptr;
56 } 61 }
57 62
58 bool MemBackendImpl::Init() { 63 bool MemBackendImpl::Init() {
59 if (max_size_) 64 if (max_size_)
60 return true; 65 return true;
61 66
62 int64_t total_memory = base::SysInfo::AmountOfPhysicalMemory(); 67 int64_t total_memory = base::SysInfo::AmountOfPhysicalMemory();
63 68
64 if (total_memory <= 0) { 69 if (total_memory <= 0) {
65 max_size_ = kDefaultInMemoryCacheSize; 70 max_size_ = kDefaultInMemoryCacheSize;
66 return true; 71 return true;
67 } 72 }
68 73
69 // We want to use up to 2% of the computer's memory, with a limit of 50 MB, 74 // We want to use up to 2% of the computer's memory, with a limit of 50 MB,
70 // reached on systemd with more than 2.5 GB of RAM. 75 // reached on system with more than 2.5 GB of RAM.
71 total_memory = total_memory * 2 / 100; 76 total_memory = total_memory * 2 / 100;
72 if (total_memory > kDefaultInMemoryCacheSize * 5) 77 if (total_memory > kDefaultInMemoryCacheSize * 5)
73 max_size_ = kDefaultInMemoryCacheSize * 5; 78 max_size_ = kDefaultInMemoryCacheSize * 5;
74 else 79 else
75 max_size_ = static_cast<int32_t>(total_memory); 80 max_size_ = static_cast<int32_t>(total_memory);
76 81
77 return true; 82 return true;
78 } 83 }
79 84
80 bool MemBackendImpl::SetMaxSize(int max_bytes) { 85 bool MemBackendImpl::SetMaxSize(int max_bytes) {
81 static_assert(sizeof(max_bytes) == sizeof(max_size_), 86 static_assert(sizeof(max_bytes) == sizeof(max_size_),
82 "unsupported int model"); 87 "unsupported int model");
83 if (max_bytes < 0) 88 if (max_bytes < 0)
84 return false; 89 return false;
85 90
86 // Zero size means use the default. 91 // Zero size means use the default.
87 if (!max_bytes) 92 if (!max_bytes)
88 return true; 93 return true;
89 94
90 max_size_ = max_bytes; 95 max_size_ = max_bytes;
91 return true; 96 return true;
92 } 97 }
93 98
94 void MemBackendImpl::InternalDoomEntry(MemEntryImpl* entry) {
95 // Only parent entries can be passed into this method.
96 DCHECK(entry->type() == MemEntryImpl::kParentEntry);
97
98 rankings_.Remove(entry);
99 EntryMap::iterator it = entries_.find(entry->GetKey());
100 if (it != entries_.end())
101 entries_.erase(it);
102 else
103 NOTREACHED();
104
105 entry->InternalDoom();
106 }
107
108 void MemBackendImpl::UpdateRank(MemEntryImpl* node) {
109 rankings_.UpdateRank(node);
110 }
111
112 void MemBackendImpl::ModifyStorageSize(int32_t old_size, int32_t new_size) {
113 if (old_size >= new_size)
114 SubstractStorageSize(old_size - new_size);
115 else
116 AddStorageSize(new_size - old_size);
117 }
118
119 int MemBackendImpl::MaxFileSize() const { 99 int MemBackendImpl::MaxFileSize() const {
120 return max_size_ / 8; 100 return max_size_ / 8;
121 } 101 }
122 102
123 void MemBackendImpl::InsertIntoRankingList(MemEntryImpl* entry) { 103 void MemBackendImpl::OnEntryInserted(MemEntryImpl* entry) {
124 rankings_.Insert(entry); 104 lru_list_.Append(entry);
125 } 105 }
126 106
127 void MemBackendImpl::RemoveFromRankingList(MemEntryImpl* entry) { 107 void MemBackendImpl::OnEntryUpdated(MemEntryImpl* entry) {
128 rankings_.Remove(entry); 108 DCHECK(CheckLRUListOrder(lru_list_));
109 // LinkedList<>::RemoveFromList() removes |entry| from |lru_list_|.
110 entry->RemoveFromList();
111 lru_list_.Append(entry);
112 }
113
114 void MemBackendImpl::OnEntryDoomed(MemEntryImpl* entry) {
115 DCHECK(CheckLRUListOrder(lru_list_));
116 if (entry->type() == MemEntryImpl::PARENT_ENTRY)
117 entries_.erase(entry->key());
118 // LinkedList<>::RemoveFromList() removes |entry| from |lru_list_|.
119 entry->RemoveFromList();
120 }
121
122 void MemBackendImpl::ModifyStorageSize(int32_t delta) {
123 current_size_ += delta;
124 if (delta > 0)
125 EvictIfNeeded();
129 } 126 }
130 127
131 net::CacheType MemBackendImpl::GetCacheType() const { 128 net::CacheType MemBackendImpl::GetCacheType() const {
132 return net::MEMORY_CACHE; 129 return net::MEMORY_CACHE;
133 } 130 }
134 131
135 int32_t MemBackendImpl::GetEntryCount() const { 132 int32_t MemBackendImpl::GetEntryCount() const {
136 return static_cast<int32_t>(entries_.size()); 133 return static_cast<int32_t>(entries_.size());
137 } 134 }
138 135
139 int MemBackendImpl::OpenEntry(const std::string& key, Entry** entry, 136 int MemBackendImpl::OpenEntry(const std::string& key,
137 Entry** entry,
140 const CompletionCallback& callback) { 138 const CompletionCallback& callback) {
141 if (OpenEntry(key, entry)) 139 EntryMap::iterator it = entries_.find(key);
142 return net::OK; 140 if (it == entries_.end())
141 return net::ERR_FAILED;
143 142
144 return net::ERR_FAILED; 143 it->second->Open();
144
145 *entry = it->second;
146 return net::OK;
145 } 147 }
146 148
147 int MemBackendImpl::CreateEntry(const std::string& key, Entry** entry, 149 int MemBackendImpl::CreateEntry(const std::string& key,
150 Entry** entry,
148 const CompletionCallback& callback) { 151 const CompletionCallback& callback) {
149 if (CreateEntry(key, entry)) 152 std::pair<EntryMap::iterator, bool> create_result =
150 return net::OK; 153 entries_.insert(EntryMap::value_type(key, nullptr));
154 const bool did_insert = create_result.second;
155 if (!did_insert)
156 return net::ERR_FAILED;
151 157
152 return net::ERR_FAILED; 158 MemEntryImpl* cache_entry = new MemEntryImpl(this, key, net_log_);
159 create_result.first->second = cache_entry;
160 *entry = cache_entry;
161 return net::OK;
153 } 162 }
154 163
155 int MemBackendImpl::DoomEntry(const std::string& key, 164 int MemBackendImpl::DoomEntry(const std::string& key,
156 const CompletionCallback& callback) { 165 const CompletionCallback& callback) {
157 if (DoomEntry(key)) 166 EntryMap::iterator it = entries_.find(key);
158 return net::OK; 167 if (it == entries_.end())
168 return net::ERR_FAILED;
159 169
160 return net::ERR_FAILED; 170 it->second->Doom();
171 return net::OK;
161 } 172 }
162 173
163 int MemBackendImpl::DoomAllEntries(const CompletionCallback& callback) { 174 int MemBackendImpl::DoomAllEntries(const CompletionCallback& callback) {
164 if (DoomAllEntries()) 175 return DoomEntriesBetween(Time(), Time(), callback);
165 return net::OK;
166
167 return net::ERR_FAILED;
168 } 176 }
169 177
170 int MemBackendImpl::DoomEntriesBetween(const base::Time initial_time, 178 int MemBackendImpl::DoomEntriesBetween(Time initial_time,
171 const base::Time end_time, 179 Time end_time,
172 const CompletionCallback& callback) { 180 const CompletionCallback& callback) {
173 if (DoomEntriesBetween(initial_time, end_time)) 181 if (end_time.is_null())
174 return net::OK; 182 end_time = Time::Max();
175 183
176 return net::ERR_FAILED; 184 DCHECK_GE(end_time, initial_time);
185
186 base::LinkNode<MemEntryImpl>* node = lru_list_.head();
187 while (node != lru_list_.end() && node->value()->GetLastUsed() < initial_time)
188 node = node->next();
189 while (node != lru_list_.end() && node->value()->GetLastUsed() < end_time) {
190 MemEntryImpl* to_doom = node->value();
191 node = node->next();
192 to_doom->Doom();
193 }
194
195 return net::OK;
177 } 196 }
178 197
179 int MemBackendImpl::DoomEntriesSince(const base::Time initial_time, 198 int MemBackendImpl::DoomEntriesSince(Time initial_time,
180 const CompletionCallback& callback) { 199 const CompletionCallback& callback) {
181 if (DoomEntriesSince(initial_time)) 200 return DoomEntriesBetween(initial_time, Time::Max(), callback);
182 return net::OK;
183
184 return net::ERR_FAILED;
185 } 201 }
186 202
187 int MemBackendImpl::CalculateSizeOfAllEntries( 203 int MemBackendImpl::CalculateSizeOfAllEntries(
188 const CompletionCallback& callback) { 204 const CompletionCallback& callback) {
189 return current_size_; 205 return current_size_;
190 } 206 }
191 207
192 class MemBackendImpl::MemIterator : public Backend::Iterator { 208 class MemBackendImpl::MemIterator final : public Backend::Iterator {
193 public: 209 public:
194 explicit MemIterator(base::WeakPtr<MemBackendImpl> backend) 210 explicit MemIterator(base::WeakPtr<MemBackendImpl> backend)
195 : backend_(backend), current_(NULL) { 211 : backend_(backend), current_(nullptr) {}
196 }
197 212
198 int OpenNextEntry(Entry** next_entry, 213 int OpenNextEntry(Entry** next_entry,
199 const CompletionCallback& callback) override { 214 const CompletionCallback& callback) override {
200 if (!backend_) 215 if (!backend_)
201 return net::ERR_FAILED; 216 return net::ERR_FAILED;
202 217
203 MemEntryImpl* node = backend_->rankings_.GetNext(current_); 218 // Iterate using |lru_list_|, from most recently used to least recently
219 // used, for compatibility with the unit tests that assume this behaviour.
220 // Consider the last element if we are beginning an iteration, otherwise
221 // progressively move earlier in the LRU list.
222 current_ = current_ ? current_->previous() : backend_->lru_list_.tail();
223
204 // We should never return a child entry so iterate until we hit a parent 224 // We should never return a child entry so iterate until we hit a parent
205 // entry. 225 // entry.
206 while (node && node->type() != MemEntryImpl::kParentEntry) 226 while (current_ != backend_->lru_list_.end() &&
207 node = backend_->rankings_.GetNext(node); 227 current_->value()->type() != MemEntryImpl::PARENT_ENTRY) {
208 *next_entry = node; 228 current_ = current_->previous();
209 current_ = node; 229 }
230 if (current_ == backend_->lru_list_.end()) {
231 *next_entry = nullptr;
232 return net::ERR_FAILED;
233 }
210 234
211 if (node) { 235 current_->value()->Open();
212 node->Open(); 236 *next_entry = current_->value();
213 return net::OK; 237 return net::OK;
214 }
215 return net::ERR_FAILED;
216 } 238 }
217 239
218 private: 240 private:
219 base::WeakPtr<MemBackendImpl> backend_; 241 base::WeakPtr<MemBackendImpl> backend_;
220 MemEntryImpl* current_; 242 base::LinkNode<MemEntryImpl>* current_;
221 }; 243 };
222 244
223 scoped_ptr<Backend::Iterator> MemBackendImpl::CreateIterator() { 245 scoped_ptr<Backend::Iterator> MemBackendImpl::CreateIterator() {
224 return scoped_ptr<Backend::Iterator>( 246 return scoped_ptr<Backend::Iterator>(
225 new MemIterator(weak_factory_.GetWeakPtr())); 247 new MemIterator(weak_factory_.GetWeakPtr()));
226 } 248 }
227 249
228 void MemBackendImpl::OnExternalCacheHit(const std::string& key) { 250 void MemBackendImpl::OnExternalCacheHit(const std::string& key) {
229 EntryMap::iterator it = entries_.find(key); 251 EntryMap::iterator it = entries_.find(key);
230 if (it != entries_.end()) { 252 if (it != entries_.end())
231 UpdateRank(it->second); 253 it->second->UpdateStateOnUse(MemEntryImpl::ENTRY_WAS_NOT_MODIFIED);
254 }
255
256 void MemBackendImpl::EvictIfNeeded() {
257 if (current_size_ <= max_size_)
258 return;
259
260 int target_size = std::max(0, max_size_ - kDefaultEvictionSize);
261
262 base::LinkNode<MemEntryImpl>* entry = lru_list_.head();
263 while (current_size_ > target_size && entry != lru_list_.end()) {
264 MemEntryImpl* to_doom = entry->value();
265 entry = entry->next();
266 if (!to_doom->InUse())
267 to_doom->Doom();
232 } 268 }
233 } 269 }
234 270
235 bool MemBackendImpl::OpenEntry(const std::string& key, Entry** entry) {
236 EntryMap::iterator it = entries_.find(key);
237 if (it == entries_.end())
238 return false;
239
240 it->second->Open();
241
242 *entry = it->second;
243 return true;
244 }
245
246 bool MemBackendImpl::CreateEntry(const std::string& key, Entry** entry) {
247 EntryMap::iterator it = entries_.find(key);
248 if (it != entries_.end())
249 return false;
250
251 MemEntryImpl* cache_entry = new MemEntryImpl(this);
252 if (!cache_entry->CreateEntry(key, net_log_)) {
253 delete entry;
254 return false;
255 }
256
257 rankings_.Insert(cache_entry);
258 entries_[key] = cache_entry;
259
260 *entry = cache_entry;
261 return true;
262 }
263
264 bool MemBackendImpl::DoomEntry(const std::string& key) {
265 Entry* entry;
266 if (!OpenEntry(key, &entry))
267 return false;
268
269 entry->Doom();
270 entry->Close();
271 return true;
272 }
273
274 bool MemBackendImpl::DoomAllEntries() {
275 TrimCache(true);
276 return true;
277 }
278
279 bool MemBackendImpl::DoomEntriesBetween(const Time initial_time,
280 const Time end_time) {
281 if (end_time.is_null())
282 return DoomEntriesSince(initial_time);
283
284 DCHECK(end_time >= initial_time);
285
286 MemEntryImpl* node = rankings_.GetNext(NULL);
287 // Last valid entry before |node|.
288 // Note, that entries after |node| may become invalid during |node| doom in
289 // case when they are child entries of it. It is guaranteed that
290 // parent node will go prior to it childs in ranking list (see
291 // InternalReadSparseData and InternalWriteSparseData).
292 MemEntryImpl* last_valid = NULL;
293
294 // rankings_ is ordered by last used, this will descend through the cache
295 // and start dooming items before the end_time, and will stop once it reaches
296 // an item used before the initial time.
297 while (node) {
298 if (node->GetLastUsed() < initial_time)
299 break;
300
301 if (node->GetLastUsed() < end_time)
302 node->Doom();
303 else
304 last_valid = node;
305 node = rankings_.GetNext(last_valid);
306 }
307
308 return true;
309 }
310
311 bool MemBackendImpl::DoomEntriesSince(const Time initial_time) {
312 for (;;) {
313 // Get the entry in the front.
314 Entry* entry = rankings_.GetNext(NULL);
315
316 // Break the loop when there are no more entries or the entry is too old.
317 if (!entry || entry->GetLastUsed() < initial_time)
318 return true;
319 entry->Doom();
320 }
321 }
322
323 void MemBackendImpl::TrimCache(bool empty) {
324 MemEntryImpl* next = rankings_.GetPrev(NULL);
325 if (!next)
326 return;
327
328 int target_size = empty ? 0 : LowWaterAdjust(max_size_);
329 while (current_size_ > target_size && next) {
330 MemEntryImpl* node = next;
331 next = rankings_.GetPrev(next);
332 if (!node->InUse() || empty) {
333 node->Doom();
334 }
335 }
336
337 return;
338 }
339
340 void MemBackendImpl::AddStorageSize(int32_t bytes) {
341 current_size_ += bytes;
342 DCHECK_GE(current_size_, 0);
343
344 if (current_size_ > max_size_)
345 TrimCache(false);
346 }
347
348 void MemBackendImpl::SubstractStorageSize(int32_t bytes) {
349 current_size_ -= bytes;
350 DCHECK_GE(current_size_, 0);
351 }
352
353 } // namespace disk_cache 271 } // namespace disk_cache
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698