| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "net/disk_cache/mem_backend_impl.h" | |
| 6 | |
| 7 #include "base/logging.h" | |
| 8 #include "base/sys_info.h" | |
| 9 #include "net/base/net_errors.h" | |
| 10 #include "net/disk_cache/cache_util.h" | |
| 11 #include "net/disk_cache/mem_entry_impl.h" | |
| 12 | |
| 13 using base::Time; | |
| 14 | |
| 15 namespace { | |
| 16 | |
| 17 const int kDefaultInMemoryCacheSize = 10 * 1024 * 1024; | |
| 18 const int kCleanUpMargin = 1024 * 1024; | |
| 19 | |
| 20 int LowWaterAdjust(int high_water) { | |
| 21 if (high_water < kCleanUpMargin) | |
| 22 return 0; | |
| 23 | |
| 24 return high_water - kCleanUpMargin; | |
| 25 } | |
| 26 | |
| 27 } // namespace | |
| 28 | |
| 29 namespace disk_cache { | |
| 30 | |
| 31 MemBackendImpl::MemBackendImpl(net::NetLog* net_log) | |
| 32 : max_size_(0), current_size_(0), net_log_(net_log) {} | |
| 33 | |
| 34 MemBackendImpl::~MemBackendImpl() { | |
| 35 EntryMap::iterator it = entries_.begin(); | |
| 36 while (it != entries_.end()) { | |
| 37 it->second->Doom(); | |
| 38 it = entries_.begin(); | |
| 39 } | |
| 40 DCHECK(!current_size_); | |
| 41 } | |
| 42 | |
| 43 // Static. | |
| 44 scoped_ptr<Backend> MemBackendImpl::CreateBackend(int max_bytes, | |
| 45 net::NetLog* net_log) { | |
| 46 scoped_ptr<MemBackendImpl> cache(new MemBackendImpl(net_log)); | |
| 47 cache->SetMaxSize(max_bytes); | |
| 48 if (cache->Init()) | |
| 49 return cache.PassAs<Backend>(); | |
| 50 | |
| 51 LOG(ERROR) << "Unable to create cache"; | |
| 52 return scoped_ptr<Backend>(); | |
| 53 } | |
| 54 | |
| 55 bool MemBackendImpl::Init() { | |
| 56 if (max_size_) | |
| 57 return true; | |
| 58 | |
| 59 int64 total_memory = base::SysInfo::AmountOfPhysicalMemory(); | |
| 60 | |
| 61 if (total_memory <= 0) { | |
| 62 max_size_ = kDefaultInMemoryCacheSize; | |
| 63 return true; | |
| 64 } | |
| 65 | |
| 66 // We want to use up to 2% of the computer's memory, with a limit of 50 MB, | |
| 67 // reached on systemd with more than 2.5 GB of RAM. | |
| 68 total_memory = total_memory * 2 / 100; | |
| 69 if (total_memory > kDefaultInMemoryCacheSize * 5) | |
| 70 max_size_ = kDefaultInMemoryCacheSize * 5; | |
| 71 else | |
| 72 max_size_ = static_cast<int32>(total_memory); | |
| 73 | |
| 74 return true; | |
| 75 } | |
| 76 | |
| 77 bool MemBackendImpl::SetMaxSize(int max_bytes) { | |
| 78 COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model); | |
| 79 if (max_bytes < 0) | |
| 80 return false; | |
| 81 | |
| 82 // Zero size means use the default. | |
| 83 if (!max_bytes) | |
| 84 return true; | |
| 85 | |
| 86 max_size_ = max_bytes; | |
| 87 return true; | |
| 88 } | |
| 89 | |
| 90 void MemBackendImpl::InternalDoomEntry(MemEntryImpl* entry) { | |
| 91 // Only parent entries can be passed into this method. | |
| 92 DCHECK(entry->type() == MemEntryImpl::kParentEntry); | |
| 93 | |
| 94 rankings_.Remove(entry); | |
| 95 EntryMap::iterator it = entries_.find(entry->GetKey()); | |
| 96 if (it != entries_.end()) | |
| 97 entries_.erase(it); | |
| 98 else | |
| 99 NOTREACHED(); | |
| 100 | |
| 101 entry->InternalDoom(); | |
| 102 } | |
| 103 | |
| 104 void MemBackendImpl::UpdateRank(MemEntryImpl* node) { | |
| 105 rankings_.UpdateRank(node); | |
| 106 } | |
| 107 | |
| 108 void MemBackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) { | |
| 109 if (old_size >= new_size) | |
| 110 SubstractStorageSize(old_size - new_size); | |
| 111 else | |
| 112 AddStorageSize(new_size - old_size); | |
| 113 } | |
| 114 | |
| 115 int MemBackendImpl::MaxFileSize() const { | |
| 116 return max_size_ / 8; | |
| 117 } | |
| 118 | |
| 119 void MemBackendImpl::InsertIntoRankingList(MemEntryImpl* entry) { | |
| 120 rankings_.Insert(entry); | |
| 121 } | |
| 122 | |
| 123 void MemBackendImpl::RemoveFromRankingList(MemEntryImpl* entry) { | |
| 124 rankings_.Remove(entry); | |
| 125 } | |
| 126 | |
| 127 net::CacheType MemBackendImpl::GetCacheType() const { | |
| 128 return net::MEMORY_CACHE; | |
| 129 } | |
| 130 | |
| 131 int32 MemBackendImpl::GetEntryCount() const { | |
| 132 return static_cast<int32>(entries_.size()); | |
| 133 } | |
| 134 | |
| 135 int MemBackendImpl::OpenEntry(const std::string& key, Entry** entry, | |
| 136 const CompletionCallback& callback) { | |
| 137 if (OpenEntry(key, entry)) | |
| 138 return net::OK; | |
| 139 | |
| 140 return net::ERR_FAILED; | |
| 141 } | |
| 142 | |
| 143 int MemBackendImpl::CreateEntry(const std::string& key, Entry** entry, | |
| 144 const CompletionCallback& callback) { | |
| 145 if (CreateEntry(key, entry)) | |
| 146 return net::OK; | |
| 147 | |
| 148 return net::ERR_FAILED; | |
| 149 } | |
| 150 | |
| 151 int MemBackendImpl::DoomEntry(const std::string& key, | |
| 152 const CompletionCallback& callback) { | |
| 153 if (DoomEntry(key)) | |
| 154 return net::OK; | |
| 155 | |
| 156 return net::ERR_FAILED; | |
| 157 } | |
| 158 | |
| 159 int MemBackendImpl::DoomAllEntries(const CompletionCallback& callback) { | |
| 160 if (DoomAllEntries()) | |
| 161 return net::OK; | |
| 162 | |
| 163 return net::ERR_FAILED; | |
| 164 } | |
| 165 | |
| 166 int MemBackendImpl::DoomEntriesBetween(const base::Time initial_time, | |
| 167 const base::Time end_time, | |
| 168 const CompletionCallback& callback) { | |
| 169 if (DoomEntriesBetween(initial_time, end_time)) | |
| 170 return net::OK; | |
| 171 | |
| 172 return net::ERR_FAILED; | |
| 173 } | |
| 174 | |
| 175 int MemBackendImpl::DoomEntriesSince(const base::Time initial_time, | |
| 176 const CompletionCallback& callback) { | |
| 177 if (DoomEntriesSince(initial_time)) | |
| 178 return net::OK; | |
| 179 | |
| 180 return net::ERR_FAILED; | |
| 181 } | |
| 182 | |
| 183 int MemBackendImpl::OpenNextEntry(void** iter, Entry** next_entry, | |
| 184 const CompletionCallback& callback) { | |
| 185 if (OpenNextEntry(iter, next_entry)) | |
| 186 return net::OK; | |
| 187 | |
| 188 return net::ERR_FAILED; | |
| 189 } | |
| 190 | |
| 191 void MemBackendImpl::EndEnumeration(void** iter) { | |
| 192 *iter = NULL; | |
| 193 } | |
| 194 | |
| 195 void MemBackendImpl::OnExternalCacheHit(const std::string& key) { | |
| 196 EntryMap::iterator it = entries_.find(key); | |
| 197 if (it != entries_.end()) { | |
| 198 UpdateRank(it->second); | |
| 199 } | |
| 200 } | |
| 201 | |
| 202 bool MemBackendImpl::OpenEntry(const std::string& key, Entry** entry) { | |
| 203 EntryMap::iterator it = entries_.find(key); | |
| 204 if (it == entries_.end()) | |
| 205 return false; | |
| 206 | |
| 207 it->second->Open(); | |
| 208 | |
| 209 *entry = it->second; | |
| 210 return true; | |
| 211 } | |
| 212 | |
| 213 bool MemBackendImpl::CreateEntry(const std::string& key, Entry** entry) { | |
| 214 EntryMap::iterator it = entries_.find(key); | |
| 215 if (it != entries_.end()) | |
| 216 return false; | |
| 217 | |
| 218 MemEntryImpl* cache_entry = new MemEntryImpl(this); | |
| 219 if (!cache_entry->CreateEntry(key, net_log_)) { | |
| 220 delete entry; | |
| 221 return false; | |
| 222 } | |
| 223 | |
| 224 rankings_.Insert(cache_entry); | |
| 225 entries_[key] = cache_entry; | |
| 226 | |
| 227 *entry = cache_entry; | |
| 228 return true; | |
| 229 } | |
| 230 | |
| 231 bool MemBackendImpl::DoomEntry(const std::string& key) { | |
| 232 Entry* entry; | |
| 233 if (!OpenEntry(key, &entry)) | |
| 234 return false; | |
| 235 | |
| 236 entry->Doom(); | |
| 237 entry->Close(); | |
| 238 return true; | |
| 239 } | |
| 240 | |
| 241 bool MemBackendImpl::DoomAllEntries() { | |
| 242 TrimCache(true); | |
| 243 return true; | |
| 244 } | |
| 245 | |
| 246 bool MemBackendImpl::DoomEntriesBetween(const Time initial_time, | |
| 247 const Time end_time) { | |
| 248 if (end_time.is_null()) | |
| 249 return DoomEntriesSince(initial_time); | |
| 250 | |
| 251 DCHECK(end_time >= initial_time); | |
| 252 | |
| 253 MemEntryImpl* node = rankings_.GetNext(NULL); | |
| 254 // Last valid entry before |node|. | |
| 255 // Note, that entries after |node| may become invalid during |node| doom in | |
| 256 // case when they are child entries of it. It is guaranteed that | |
| 257 // parent node will go prior to it childs in ranking list (see | |
| 258 // InternalReadSparseData and InternalWriteSparseData). | |
| 259 MemEntryImpl* last_valid = NULL; | |
| 260 | |
| 261 // rankings_ is ordered by last used, this will descend through the cache | |
| 262 // and start dooming items before the end_time, and will stop once it reaches | |
| 263 // an item used before the initial time. | |
| 264 while (node) { | |
| 265 if (node->GetLastUsed() < initial_time) | |
| 266 break; | |
| 267 | |
| 268 if (node->GetLastUsed() < end_time) | |
| 269 node->Doom(); | |
| 270 else | |
| 271 last_valid = node; | |
| 272 node = rankings_.GetNext(last_valid); | |
| 273 } | |
| 274 | |
| 275 return true; | |
| 276 } | |
| 277 | |
| 278 bool MemBackendImpl::DoomEntriesSince(const Time initial_time) { | |
| 279 for (;;) { | |
| 280 // Get the entry in the front. | |
| 281 Entry* entry = rankings_.GetNext(NULL); | |
| 282 | |
| 283 // Break the loop when there are no more entries or the entry is too old. | |
| 284 if (!entry || entry->GetLastUsed() < initial_time) | |
| 285 return true; | |
| 286 entry->Doom(); | |
| 287 } | |
| 288 } | |
| 289 | |
| 290 bool MemBackendImpl::OpenNextEntry(void** iter, Entry** next_entry) { | |
| 291 MemEntryImpl* current = reinterpret_cast<MemEntryImpl*>(*iter); | |
| 292 MemEntryImpl* node = rankings_.GetNext(current); | |
| 293 // We should never return a child entry so iterate until we hit a parent | |
| 294 // entry. | |
| 295 while (node && node->type() != MemEntryImpl::kParentEntry) { | |
| 296 node = rankings_.GetNext(node); | |
| 297 } | |
| 298 *next_entry = node; | |
| 299 *iter = node; | |
| 300 | |
| 301 if (node) | |
| 302 node->Open(); | |
| 303 | |
| 304 return NULL != node; | |
| 305 } | |
| 306 | |
| 307 void MemBackendImpl::TrimCache(bool empty) { | |
| 308 MemEntryImpl* next = rankings_.GetPrev(NULL); | |
| 309 if (!next) | |
| 310 return; | |
| 311 | |
| 312 int target_size = empty ? 0 : LowWaterAdjust(max_size_); | |
| 313 while (current_size_ > target_size && next) { | |
| 314 MemEntryImpl* node = next; | |
| 315 next = rankings_.GetPrev(next); | |
| 316 if (!node->InUse() || empty) { | |
| 317 node->Doom(); | |
| 318 } | |
| 319 } | |
| 320 | |
| 321 return; | |
| 322 } | |
| 323 | |
| 324 void MemBackendImpl::AddStorageSize(int32 bytes) { | |
| 325 current_size_ += bytes; | |
| 326 DCHECK_GE(current_size_, 0); | |
| 327 | |
| 328 if (current_size_ > max_size_) | |
| 329 TrimCache(false); | |
| 330 } | |
| 331 | |
| 332 void MemBackendImpl::SubstractStorageSize(int32 bytes) { | |
| 333 current_size_ -= bytes; | |
| 334 DCHECK_GE(current_size_, 0); | |
| 335 } | |
| 336 | |
| 337 } // namespace disk_cache | |
| OLD | NEW |