OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "net/disk_cache/memory/mem_backend_impl.h" | |
6 | |
7 #include "base/logging.h" | |
8 #include "base/sys_info.h" | |
9 #include "net/base/net_errors.h" | |
10 #include "net/disk_cache/cache_util.h" | |
11 #include "net/disk_cache/memory/mem_entry_impl.h" | |
12 | |
13 using base::Time; | |
14 | |
15 namespace { | |
16 | |
17 const int kDefaultInMemoryCacheSize = 10 * 1024 * 1024; | |
18 const int kCleanUpMargin = 1024 * 1024; | |
19 | |
20 int LowWaterAdjust(int high_water) { | |
21 if (high_water < kCleanUpMargin) | |
22 return 0; | |
23 | |
24 return high_water - kCleanUpMargin; | |
25 } | |
26 | |
27 } // namespace | |
28 | |
29 namespace disk_cache { | |
30 | |
31 MemBackendImpl::MemBackendImpl(net::NetLog* net_log) | |
32 : max_size_(0), current_size_(0), net_log_(net_log), weak_factory_(this) { | |
33 } | |
34 | |
35 MemBackendImpl::~MemBackendImpl() { | |
36 EntryMap::iterator it = entries_.begin(); | |
37 while (it != entries_.end()) { | |
38 it->second->Doom(); | |
39 it = entries_.begin(); | |
40 } | |
41 DCHECK(!current_size_); | |
42 } | |
43 | |
44 // Static. | |
45 scoped_ptr<Backend> MemBackendImpl::CreateBackend(int max_bytes, | |
46 net::NetLog* net_log) { | |
47 scoped_ptr<MemBackendImpl> cache(new MemBackendImpl(net_log)); | |
48 cache->SetMaxSize(max_bytes); | |
49 if (cache->Init()) | |
50 return cache.Pass(); | |
51 | |
52 LOG(ERROR) << "Unable to create cache"; | |
53 return nullptr; | |
54 } | |
55 | |
56 bool MemBackendImpl::Init() { | |
57 if (max_size_) | |
58 return true; | |
59 | |
60 int64 total_memory = base::SysInfo::AmountOfPhysicalMemory(); | |
61 | |
62 if (total_memory <= 0) { | |
63 max_size_ = kDefaultInMemoryCacheSize; | |
64 return true; | |
65 } | |
66 | |
67 // We want to use up to 2% of the computer's memory, with a limit of 50 MB, | |
68 // reached on systemd with more than 2.5 GB of RAM. | |
69 total_memory = total_memory * 2 / 100; | |
70 if (total_memory > kDefaultInMemoryCacheSize * 5) | |
71 max_size_ = kDefaultInMemoryCacheSize * 5; | |
72 else | |
73 max_size_ = static_cast<int32>(total_memory); | |
74 | |
75 return true; | |
76 } | |
77 | |
78 bool MemBackendImpl::SetMaxSize(int max_bytes) { | |
79 static_assert(sizeof(max_bytes) == sizeof(max_size_), | |
80 "unsupported int model"); | |
81 if (max_bytes < 0) | |
82 return false; | |
83 | |
84 // Zero size means use the default. | |
85 if (!max_bytes) | |
86 return true; | |
87 | |
88 max_size_ = max_bytes; | |
89 return true; | |
90 } | |
91 | |
92 void MemBackendImpl::InternalDoomEntry(MemEntryImpl* entry) { | |
93 // Only parent entries can be passed into this method. | |
94 DCHECK(entry->type() == MemEntryImpl::kParentEntry); | |
95 | |
96 rankings_.Remove(entry); | |
97 EntryMap::iterator it = entries_.find(entry->GetKey()); | |
98 if (it != entries_.end()) | |
99 entries_.erase(it); | |
100 else | |
101 NOTREACHED(); | |
102 | |
103 entry->InternalDoom(); | |
104 } | |
105 | |
106 void MemBackendImpl::UpdateRank(MemEntryImpl* node) { | |
107 rankings_.UpdateRank(node); | |
108 } | |
109 | |
110 void MemBackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) { | |
111 if (old_size >= new_size) | |
112 SubstractStorageSize(old_size - new_size); | |
113 else | |
114 AddStorageSize(new_size - old_size); | |
115 } | |
116 | |
117 int MemBackendImpl::MaxFileSize() const { | |
118 return max_size_ / 8; | |
119 } | |
120 | |
121 void MemBackendImpl::InsertIntoRankingList(MemEntryImpl* entry) { | |
122 rankings_.Insert(entry); | |
123 } | |
124 | |
125 void MemBackendImpl::RemoveFromRankingList(MemEntryImpl* entry) { | |
126 rankings_.Remove(entry); | |
127 } | |
128 | |
129 net::CacheType MemBackendImpl::GetCacheType() const { | |
130 return net::MEMORY_CACHE; | |
131 } | |
132 | |
133 int32 MemBackendImpl::GetEntryCount() const { | |
134 return static_cast<int32>(entries_.size()); | |
135 } | |
136 | |
137 int MemBackendImpl::OpenEntry(const std::string& key, Entry** entry, | |
138 const CompletionCallback& callback) { | |
139 if (OpenEntry(key, entry)) | |
140 return net::OK; | |
141 | |
142 return net::ERR_FAILED; | |
143 } | |
144 | |
145 int MemBackendImpl::CreateEntry(const std::string& key, Entry** entry, | |
146 const CompletionCallback& callback) { | |
147 if (CreateEntry(key, entry)) | |
148 return net::OK; | |
149 | |
150 return net::ERR_FAILED; | |
151 } | |
152 | |
153 int MemBackendImpl::DoomEntry(const std::string& key, | |
154 const CompletionCallback& callback) { | |
155 if (DoomEntry(key)) | |
156 return net::OK; | |
157 | |
158 return net::ERR_FAILED; | |
159 } | |
160 | |
161 int MemBackendImpl::DoomAllEntries(const CompletionCallback& callback) { | |
162 if (DoomAllEntries()) | |
163 return net::OK; | |
164 | |
165 return net::ERR_FAILED; | |
166 } | |
167 | |
168 int MemBackendImpl::DoomEntriesBetween(const base::Time initial_time, | |
169 const base::Time end_time, | |
170 const CompletionCallback& callback) { | |
171 if (DoomEntriesBetween(initial_time, end_time)) | |
172 return net::OK; | |
173 | |
174 return net::ERR_FAILED; | |
175 } | |
176 | |
177 int MemBackendImpl::DoomEntriesSince(const base::Time initial_time, | |
178 const CompletionCallback& callback) { | |
179 if (DoomEntriesSince(initial_time)) | |
180 return net::OK; | |
181 | |
182 return net::ERR_FAILED; | |
183 } | |
184 | |
185 class MemBackendImpl::MemIterator : public Backend::Iterator { | |
186 public: | |
187 explicit MemIterator(base::WeakPtr<MemBackendImpl> backend) | |
188 : backend_(backend), current_(NULL) { | |
189 } | |
190 | |
191 int OpenNextEntry(Entry** next_entry, | |
192 const CompletionCallback& callback) override { | |
193 if (!backend_) | |
194 return net::ERR_FAILED; | |
195 | |
196 MemEntryImpl* node = backend_->rankings_.GetNext(current_); | |
197 // We should never return a child entry so iterate until we hit a parent | |
198 // entry. | |
199 while (node && node->type() != MemEntryImpl::kParentEntry) | |
200 node = backend_->rankings_.GetNext(node); | |
201 *next_entry = node; | |
202 current_ = node; | |
203 | |
204 if (node) { | |
205 node->Open(); | |
206 return net::OK; | |
207 } | |
208 return net::ERR_FAILED; | |
209 } | |
210 | |
211 private: | |
212 base::WeakPtr<MemBackendImpl> backend_; | |
213 MemEntryImpl* current_; | |
214 }; | |
215 | |
216 scoped_ptr<Backend::Iterator> MemBackendImpl::CreateIterator() { | |
217 return scoped_ptr<Backend::Iterator>( | |
218 new MemIterator(weak_factory_.GetWeakPtr())); | |
219 } | |
220 | |
221 void MemBackendImpl::OnExternalCacheHit(const std::string& key) { | |
222 EntryMap::iterator it = entries_.find(key); | |
223 if (it != entries_.end()) { | |
224 UpdateRank(it->second); | |
225 } | |
226 } | |
227 | |
228 bool MemBackendImpl::OpenEntry(const std::string& key, Entry** entry) { | |
229 EntryMap::iterator it = entries_.find(key); | |
230 if (it == entries_.end()) | |
231 return false; | |
232 | |
233 it->second->Open(); | |
234 | |
235 *entry = it->second; | |
236 return true; | |
237 } | |
238 | |
239 bool MemBackendImpl::CreateEntry(const std::string& key, Entry** entry) { | |
240 EntryMap::iterator it = entries_.find(key); | |
241 if (it != entries_.end()) | |
242 return false; | |
243 | |
244 MemEntryImpl* cache_entry = new MemEntryImpl(this); | |
245 if (!cache_entry->CreateEntry(key, net_log_)) { | |
246 delete entry; | |
247 return false; | |
248 } | |
249 | |
250 rankings_.Insert(cache_entry); | |
251 entries_[key] = cache_entry; | |
252 | |
253 *entry = cache_entry; | |
254 return true; | |
255 } | |
256 | |
257 bool MemBackendImpl::DoomEntry(const std::string& key) { | |
258 Entry* entry; | |
259 if (!OpenEntry(key, &entry)) | |
260 return false; | |
261 | |
262 entry->Doom(); | |
263 entry->Close(); | |
264 return true; | |
265 } | |
266 | |
267 bool MemBackendImpl::DoomAllEntries() { | |
268 TrimCache(true); | |
269 return true; | |
270 } | |
271 | |
272 bool MemBackendImpl::DoomEntriesBetween(const Time initial_time, | |
273 const Time end_time) { | |
274 if (end_time.is_null()) | |
275 return DoomEntriesSince(initial_time); | |
276 | |
277 DCHECK(end_time >= initial_time); | |
278 | |
279 MemEntryImpl* node = rankings_.GetNext(NULL); | |
280 // Last valid entry before |node|. | |
281 // Note, that entries after |node| may become invalid during |node| doom in | |
282 // case when they are child entries of it. It is guaranteed that | |
283 // parent node will go prior to it childs in ranking list (see | |
284 // InternalReadSparseData and InternalWriteSparseData). | |
285 MemEntryImpl* last_valid = NULL; | |
286 | |
287 // rankings_ is ordered by last used, this will descend through the cache | |
288 // and start dooming items before the end_time, and will stop once it reaches | |
289 // an item used before the initial time. | |
290 while (node) { | |
291 if (node->GetLastUsed() < initial_time) | |
292 break; | |
293 | |
294 if (node->GetLastUsed() < end_time) | |
295 node->Doom(); | |
296 else | |
297 last_valid = node; | |
298 node = rankings_.GetNext(last_valid); | |
299 } | |
300 | |
301 return true; | |
302 } | |
303 | |
304 bool MemBackendImpl::DoomEntriesSince(const Time initial_time) { | |
305 for (;;) { | |
306 // Get the entry in the front. | |
307 Entry* entry = rankings_.GetNext(NULL); | |
308 | |
309 // Break the loop when there are no more entries or the entry is too old. | |
310 if (!entry || entry->GetLastUsed() < initial_time) | |
311 return true; | |
312 entry->Doom(); | |
313 } | |
314 } | |
315 | |
316 void MemBackendImpl::TrimCache(bool empty) { | |
317 MemEntryImpl* next = rankings_.GetPrev(NULL); | |
318 if (!next) | |
319 return; | |
320 | |
321 int target_size = empty ? 0 : LowWaterAdjust(max_size_); | |
322 while (current_size_ > target_size && next) { | |
323 MemEntryImpl* node = next; | |
324 next = rankings_.GetPrev(next); | |
325 if (!node->InUse() || empty) { | |
326 node->Doom(); | |
327 } | |
328 } | |
329 | |
330 return; | |
331 } | |
332 | |
333 void MemBackendImpl::AddStorageSize(int32 bytes) { | |
334 current_size_ += bytes; | |
335 DCHECK_GE(current_size_, 0); | |
336 | |
337 if (current_size_ > max_size_) | |
338 TrimCache(false); | |
339 } | |
340 | |
341 void MemBackendImpl::SubstractStorageSize(int32 bytes) { | |
342 current_size_ -= bytes; | |
343 DCHECK_GE(current_size_, 0); | |
344 } | |
345 | |
346 } // namespace disk_cache | |
OLD | NEW |