| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "net/disk_cache/simple/simple_backend_impl.h" | |
| 6 | |
| 7 #include <algorithm> | |
| 8 #include <cstdlib> | |
| 9 #include <functional> | |
| 10 | |
| 11 #if defined(OS_POSIX) | |
| 12 #include <sys/resource.h> | |
| 13 #endif | |
| 14 | |
| 15 #include "base/bind.h" | |
| 16 #include "base/callback.h" | |
| 17 #include "base/files/file_util.h" | |
| 18 #include "base/location.h" | |
| 19 #include "base/metrics/field_trial.h" | |
| 20 #include "base/metrics/histogram.h" | |
| 21 #include "base/metrics/sparse_histogram.h" | |
| 22 #include "base/single_thread_task_runner.h" | |
| 23 #include "base/sys_info.h" | |
| 24 #include "base/task_runner_util.h" | |
| 25 #include "base/thread_task_runner_handle.h" | |
| 26 #include "base/threading/sequenced_worker_pool.h" | |
| 27 #include "base/time/time.h" | |
| 28 #include "net/base/net_errors.h" | |
| 29 #include "net/disk_cache/cache_util.h" | |
| 30 #include "net/disk_cache/simple/simple_entry_format.h" | |
| 31 #include "net/disk_cache/simple/simple_entry_impl.h" | |
| 32 #include "net/disk_cache/simple/simple_histogram_macros.h" | |
| 33 #include "net/disk_cache/simple/simple_index.h" | |
| 34 #include "net/disk_cache/simple/simple_index_file.h" | |
| 35 #include "net/disk_cache/simple/simple_synchronous_entry.h" | |
| 36 #include "net/disk_cache/simple/simple_util.h" | |
| 37 #include "net/disk_cache/simple/simple_version_upgrade.h" | |
| 38 | |
| 39 using base::Callback; | |
| 40 using base::Closure; | |
| 41 using base::FilePath; | |
| 42 using base::SequencedWorkerPool; | |
| 43 using base::Time; | |
| 44 using base::DirectoryExists; | |
| 45 using base::CreateDirectory; | |
| 46 | |
| 47 namespace disk_cache { | |
| 48 | |
| 49 namespace { | |
| 50 | |
| 51 // Maximum number of concurrent worker pool threads, which also is the limit | |
| 52 // on concurrent IO (as we use one thread per IO request). | |
| 53 const size_t kMaxWorkerThreads = 5U; | |
| 54 | |
| 55 const char kThreadNamePrefix[] = "SimpleCache"; | |
| 56 | |
| 57 // Maximum fraction of the cache that one entry can consume. | |
| 58 const int kMaxFileRatio = 8; | |
| 59 | |
| 60 // A global sequenced worker pool to use for launching all tasks. | |
| 61 SequencedWorkerPool* g_sequenced_worker_pool = NULL; | |
| 62 | |
| 63 void MaybeCreateSequencedWorkerPool() { | |
| 64 if (!g_sequenced_worker_pool) { | |
| 65 g_sequenced_worker_pool = | |
| 66 new SequencedWorkerPool(kMaxWorkerThreads, kThreadNamePrefix); | |
| 67 g_sequenced_worker_pool->AddRef(); // Leak it. | |
| 68 } | |
| 69 } | |
| 70 | |
| 71 bool g_fd_limit_histogram_has_been_populated = false; | |
| 72 | |
| 73 void MaybeHistogramFdLimit(net::CacheType cache_type) { | |
| 74 if (g_fd_limit_histogram_has_been_populated) | |
| 75 return; | |
| 76 | |
| 77 // Used in histograms; add new entries at end. | |
| 78 enum FdLimitStatus { | |
| 79 FD_LIMIT_STATUS_UNSUPPORTED = 0, | |
| 80 FD_LIMIT_STATUS_FAILED = 1, | |
| 81 FD_LIMIT_STATUS_SUCCEEDED = 2, | |
| 82 FD_LIMIT_STATUS_MAX = 3 | |
| 83 }; | |
| 84 FdLimitStatus fd_limit_status = FD_LIMIT_STATUS_UNSUPPORTED; | |
| 85 int soft_fd_limit = 0; | |
| 86 int hard_fd_limit = 0; | |
| 87 | |
| 88 #if defined(OS_POSIX) | |
| 89 struct rlimit nofile; | |
| 90 if (!getrlimit(RLIMIT_NOFILE, &nofile)) { | |
| 91 soft_fd_limit = nofile.rlim_cur; | |
| 92 hard_fd_limit = nofile.rlim_max; | |
| 93 fd_limit_status = FD_LIMIT_STATUS_SUCCEEDED; | |
| 94 } else { | |
| 95 fd_limit_status = FD_LIMIT_STATUS_FAILED; | |
| 96 } | |
| 97 #endif | |
| 98 | |
| 99 SIMPLE_CACHE_UMA(ENUMERATION, | |
| 100 "FileDescriptorLimitStatus", cache_type, | |
| 101 fd_limit_status, FD_LIMIT_STATUS_MAX); | |
| 102 if (fd_limit_status == FD_LIMIT_STATUS_SUCCEEDED) { | |
| 103 SIMPLE_CACHE_UMA(SPARSE_SLOWLY, | |
| 104 "FileDescriptorLimitSoft", cache_type, soft_fd_limit); | |
| 105 SIMPLE_CACHE_UMA(SPARSE_SLOWLY, | |
| 106 "FileDescriptorLimitHard", cache_type, hard_fd_limit); | |
| 107 } | |
| 108 | |
| 109 g_fd_limit_histogram_has_been_populated = true; | |
| 110 } | |
| 111 | |
| 112 // Detects if the files in the cache directory match the current disk cache | |
| 113 // backend type and version. If the directory contains no cache, occupies it | |
| 114 // with the fresh structure. | |
| 115 bool FileStructureConsistent(const base::FilePath& path) { | |
| 116 if (!base::PathExists(path) && !base::CreateDirectory(path)) { | |
| 117 LOG(ERROR) << "Failed to create directory: " << path.LossyDisplayName(); | |
| 118 return false; | |
| 119 } | |
| 120 return disk_cache::UpgradeSimpleCacheOnDisk(path); | |
| 121 } | |
| 122 | |
| 123 // A context used by a BarrierCompletionCallback to track state. | |
| 124 struct BarrierContext { | |
| 125 BarrierContext(int expected) | |
| 126 : expected(expected), | |
| 127 count(0), | |
| 128 had_error(false) {} | |
| 129 | |
| 130 const int expected; | |
| 131 int count; | |
| 132 bool had_error; | |
| 133 }; | |
| 134 | |
| 135 void BarrierCompletionCallbackImpl( | |
| 136 BarrierContext* context, | |
| 137 const net::CompletionCallback& final_callback, | |
| 138 int result) { | |
| 139 DCHECK_GT(context->expected, context->count); | |
| 140 if (context->had_error) | |
| 141 return; | |
| 142 if (result != net::OK) { | |
| 143 context->had_error = true; | |
| 144 final_callback.Run(result); | |
| 145 return; | |
| 146 } | |
| 147 ++context->count; | |
| 148 if (context->count == context->expected) | |
| 149 final_callback.Run(net::OK); | |
| 150 } | |
| 151 | |
| 152 // A barrier completion callback is a net::CompletionCallback that waits for | |
| 153 // |count| successful results before invoking |final_callback|. In the case of | |
| 154 // an error, the first error is passed to |final_callback| and all others | |
| 155 // are ignored. | |
| 156 net::CompletionCallback MakeBarrierCompletionCallback( | |
| 157 int count, | |
| 158 const net::CompletionCallback& final_callback) { | |
| 159 BarrierContext* context = new BarrierContext(count); | |
| 160 return base::Bind(&BarrierCompletionCallbackImpl, | |
| 161 base::Owned(context), final_callback); | |
| 162 } | |
| 163 | |
| 164 // A short bindable thunk that ensures a completion callback is always called | |
| 165 // after running an operation asynchronously. | |
| 166 void RunOperationAndCallback( | |
| 167 const Callback<int(const net::CompletionCallback&)>& operation, | |
| 168 const net::CompletionCallback& operation_callback) { | |
| 169 const int operation_result = operation.Run(operation_callback); | |
| 170 if (operation_result != net::ERR_IO_PENDING) | |
| 171 operation_callback.Run(operation_result); | |
| 172 } | |
| 173 | |
| 174 void RecordIndexLoad(net::CacheType cache_type, | |
| 175 base::TimeTicks constructed_since, | |
| 176 int result) { | |
| 177 const base::TimeDelta creation_to_index = base::TimeTicks::Now() - | |
| 178 constructed_since; | |
| 179 if (result == net::OK) { | |
| 180 SIMPLE_CACHE_UMA(TIMES, "CreationToIndex", cache_type, creation_to_index); | |
| 181 } else { | |
| 182 SIMPLE_CACHE_UMA(TIMES, | |
| 183 "CreationToIndexFail", cache_type, creation_to_index); | |
| 184 } | |
| 185 } | |
| 186 | |
| 187 } // namespace | |
| 188 | |
| 189 class SimpleBackendImpl::ActiveEntryProxy | |
| 190 : public SimpleEntryImpl::ActiveEntryProxy { | |
| 191 public: | |
| 192 ~ActiveEntryProxy() override { | |
| 193 if (backend_) { | |
| 194 DCHECK_EQ(1U, backend_->active_entries_.count(entry_hash_)); | |
| 195 backend_->active_entries_.erase(entry_hash_); | |
| 196 } | |
| 197 } | |
| 198 | |
| 199 static scoped_ptr<SimpleEntryImpl::ActiveEntryProxy> Create( | |
| 200 int64 entry_hash, | |
| 201 SimpleBackendImpl* backend) { | |
| 202 scoped_ptr<SimpleEntryImpl::ActiveEntryProxy> | |
| 203 proxy(new ActiveEntryProxy(entry_hash, backend)); | |
| 204 return proxy.Pass(); | |
| 205 } | |
| 206 | |
| 207 private: | |
| 208 ActiveEntryProxy(uint64 entry_hash, | |
| 209 SimpleBackendImpl* backend) | |
| 210 : entry_hash_(entry_hash), | |
| 211 backend_(backend->AsWeakPtr()) {} | |
| 212 | |
| 213 uint64 entry_hash_; | |
| 214 base::WeakPtr<SimpleBackendImpl> backend_; | |
| 215 }; | |
| 216 | |
| 217 SimpleBackendImpl::SimpleBackendImpl( | |
| 218 const FilePath& path, | |
| 219 int max_bytes, | |
| 220 net::CacheType cache_type, | |
| 221 const scoped_refptr<base::SingleThreadTaskRunner>& cache_thread, | |
| 222 net::NetLog* net_log) | |
| 223 : path_(path), | |
| 224 cache_type_(cache_type), | |
| 225 cache_thread_(cache_thread), | |
| 226 orig_max_size_(max_bytes), | |
| 227 entry_operations_mode_(cache_type == net::DISK_CACHE ? | |
| 228 SimpleEntryImpl::OPTIMISTIC_OPERATIONS : | |
| 229 SimpleEntryImpl::NON_OPTIMISTIC_OPERATIONS), | |
| 230 net_log_(net_log) { | |
| 231 MaybeHistogramFdLimit(cache_type_); | |
| 232 } | |
| 233 | |
| 234 SimpleBackendImpl::~SimpleBackendImpl() { | |
| 235 index_->WriteToDisk(); | |
| 236 } | |
| 237 | |
| 238 int SimpleBackendImpl::Init(const CompletionCallback& completion_callback) { | |
| 239 MaybeCreateSequencedWorkerPool(); | |
| 240 | |
| 241 worker_pool_ = g_sequenced_worker_pool->GetTaskRunnerWithShutdownBehavior( | |
| 242 SequencedWorkerPool::CONTINUE_ON_SHUTDOWN); | |
| 243 | |
| 244 index_.reset(new SimpleIndex( | |
| 245 base::ThreadTaskRunnerHandle::Get(), | |
| 246 this, | |
| 247 cache_type_, | |
| 248 make_scoped_ptr(new SimpleIndexFile( | |
| 249 cache_thread_, worker_pool_.get(), cache_type_, path_)))); | |
| 250 index_->ExecuteWhenReady( | |
| 251 base::Bind(&RecordIndexLoad, cache_type_, base::TimeTicks::Now())); | |
| 252 | |
| 253 PostTaskAndReplyWithResult( | |
| 254 cache_thread_.get(), | |
| 255 FROM_HERE, | |
| 256 base::Bind( | |
| 257 &SimpleBackendImpl::InitCacheStructureOnDisk, path_, orig_max_size_), | |
| 258 base::Bind(&SimpleBackendImpl::InitializeIndex, | |
| 259 AsWeakPtr(), | |
| 260 completion_callback)); | |
| 261 return net::ERR_IO_PENDING; | |
| 262 } | |
| 263 | |
| 264 bool SimpleBackendImpl::SetMaxSize(int max_bytes) { | |
| 265 if (max_bytes < 0) | |
| 266 return false; | |
| 267 orig_max_size_ = max_bytes; | |
| 268 index_->SetMaxSize(max_bytes); | |
| 269 return true; | |
| 270 } | |
| 271 | |
| 272 int SimpleBackendImpl::GetMaxFileSize() const { | |
| 273 return static_cast<int>(index_->max_size() / kMaxFileRatio); | |
| 274 } | |
| 275 | |
| 276 void SimpleBackendImpl::OnDoomStart(uint64 entry_hash) { | |
| 277 DCHECK_EQ(0u, entries_pending_doom_.count(entry_hash)); | |
| 278 entries_pending_doom_.insert( | |
| 279 std::make_pair(entry_hash, std::vector<Closure>())); | |
| 280 } | |
| 281 | |
| 282 void SimpleBackendImpl::OnDoomComplete(uint64 entry_hash) { | |
| 283 DCHECK_EQ(1u, entries_pending_doom_.count(entry_hash)); | |
| 284 base::hash_map<uint64, std::vector<Closure> >::iterator it = | |
| 285 entries_pending_doom_.find(entry_hash); | |
| 286 std::vector<Closure> to_run_closures; | |
| 287 to_run_closures.swap(it->second); | |
| 288 entries_pending_doom_.erase(it); | |
| 289 | |
| 290 std::for_each(to_run_closures.begin(), to_run_closures.end(), | |
| 291 std::mem_fun_ref(&Closure::Run)); | |
| 292 } | |
| 293 | |
| 294 void SimpleBackendImpl::DoomEntries(std::vector<uint64>* entry_hashes, | |
| 295 const net::CompletionCallback& callback) { | |
| 296 scoped_ptr<std::vector<uint64> > | |
| 297 mass_doom_entry_hashes(new std::vector<uint64>()); | |
| 298 mass_doom_entry_hashes->swap(*entry_hashes); | |
| 299 | |
| 300 std::vector<uint64> to_doom_individually_hashes; | |
| 301 | |
| 302 // For each of the entry hashes, there are two cases: | |
| 303 // 1. The entry is either open or pending doom, and so it should be doomed | |
| 304 // individually to avoid flakes. | |
| 305 // 2. The entry is not in use at all, so we can call | |
| 306 // SimpleSynchronousEntry::DoomEntrySet and delete the files en masse. | |
| 307 for (int i = mass_doom_entry_hashes->size() - 1; i >= 0; --i) { | |
| 308 const uint64 entry_hash = (*mass_doom_entry_hashes)[i]; | |
| 309 DCHECK(active_entries_.count(entry_hash) == 0 || | |
| 310 entries_pending_doom_.count(entry_hash) == 0); | |
| 311 if (!active_entries_.count(entry_hash) && | |
| 312 !entries_pending_doom_.count(entry_hash)) { | |
| 313 continue; | |
| 314 } | |
| 315 | |
| 316 to_doom_individually_hashes.push_back(entry_hash); | |
| 317 | |
| 318 (*mass_doom_entry_hashes)[i] = mass_doom_entry_hashes->back(); | |
| 319 mass_doom_entry_hashes->resize(mass_doom_entry_hashes->size() - 1); | |
| 320 } | |
| 321 | |
| 322 net::CompletionCallback barrier_callback = | |
| 323 MakeBarrierCompletionCallback(to_doom_individually_hashes.size() + 1, | |
| 324 callback); | |
| 325 for (std::vector<uint64>::const_iterator | |
| 326 it = to_doom_individually_hashes.begin(), | |
| 327 end = to_doom_individually_hashes.end(); it != end; ++it) { | |
| 328 const int doom_result = DoomEntryFromHash(*it, barrier_callback); | |
| 329 DCHECK_EQ(net::ERR_IO_PENDING, doom_result); | |
| 330 index_->Remove(*it); | |
| 331 } | |
| 332 | |
| 333 for (std::vector<uint64>::const_iterator it = mass_doom_entry_hashes->begin(), | |
| 334 end = mass_doom_entry_hashes->end(); | |
| 335 it != end; ++it) { | |
| 336 index_->Remove(*it); | |
| 337 OnDoomStart(*it); | |
| 338 } | |
| 339 | |
| 340 // Taking this pointer here avoids undefined behaviour from calling | |
| 341 // base::Passed before mass_doom_entry_hashes.get(). | |
| 342 std::vector<uint64>* mass_doom_entry_hashes_ptr = | |
| 343 mass_doom_entry_hashes.get(); | |
| 344 PostTaskAndReplyWithResult(worker_pool_.get(), | |
| 345 FROM_HERE, | |
| 346 base::Bind(&SimpleSynchronousEntry::DoomEntrySet, | |
| 347 mass_doom_entry_hashes_ptr, | |
| 348 path_), | |
| 349 base::Bind(&SimpleBackendImpl::DoomEntriesComplete, | |
| 350 AsWeakPtr(), | |
| 351 base::Passed(&mass_doom_entry_hashes), | |
| 352 barrier_callback)); | |
| 353 } | |
| 354 | |
| 355 net::CacheType SimpleBackendImpl::GetCacheType() const { | |
| 356 return net::DISK_CACHE; | |
| 357 } | |
| 358 | |
| 359 int32 SimpleBackendImpl::GetEntryCount() const { | |
| 360 // TODO(pasko): Use directory file count when index is not ready. | |
| 361 return index_->GetEntryCount(); | |
| 362 } | |
| 363 | |
| 364 int SimpleBackendImpl::OpenEntry(const std::string& key, | |
| 365 Entry** entry, | |
| 366 const CompletionCallback& callback) { | |
| 367 const uint64 entry_hash = simple_util::GetEntryHashKey(key); | |
| 368 | |
| 369 // TODO(gavinp): Factor out this (not quite completely) repetitive code | |
| 370 // block from OpenEntry/CreateEntry/DoomEntry. | |
| 371 base::hash_map<uint64, std::vector<Closure> >::iterator it = | |
| 372 entries_pending_doom_.find(entry_hash); | |
| 373 if (it != entries_pending_doom_.end()) { | |
| 374 Callback<int(const net::CompletionCallback&)> operation = | |
| 375 base::Bind(&SimpleBackendImpl::OpenEntry, | |
| 376 base::Unretained(this), key, entry); | |
| 377 it->second.push_back(base::Bind(&RunOperationAndCallback, | |
| 378 operation, callback)); | |
| 379 return net::ERR_IO_PENDING; | |
| 380 } | |
| 381 scoped_refptr<SimpleEntryImpl> simple_entry = | |
| 382 CreateOrFindActiveEntry(entry_hash, key); | |
| 383 CompletionCallback backend_callback = | |
| 384 base::Bind(&SimpleBackendImpl::OnEntryOpenedFromKey, | |
| 385 AsWeakPtr(), | |
| 386 key, | |
| 387 entry, | |
| 388 simple_entry, | |
| 389 callback); | |
| 390 return simple_entry->OpenEntry(entry, backend_callback); | |
| 391 } | |
| 392 | |
| 393 int SimpleBackendImpl::CreateEntry(const std::string& key, | |
| 394 Entry** entry, | |
| 395 const CompletionCallback& callback) { | |
| 396 DCHECK_LT(0u, key.size()); | |
| 397 const uint64 entry_hash = simple_util::GetEntryHashKey(key); | |
| 398 | |
| 399 base::hash_map<uint64, std::vector<Closure> >::iterator it = | |
| 400 entries_pending_doom_.find(entry_hash); | |
| 401 if (it != entries_pending_doom_.end()) { | |
| 402 Callback<int(const net::CompletionCallback&)> operation = | |
| 403 base::Bind(&SimpleBackendImpl::CreateEntry, | |
| 404 base::Unretained(this), key, entry); | |
| 405 it->second.push_back(base::Bind(&RunOperationAndCallback, | |
| 406 operation, callback)); | |
| 407 return net::ERR_IO_PENDING; | |
| 408 } | |
| 409 scoped_refptr<SimpleEntryImpl> simple_entry = | |
| 410 CreateOrFindActiveEntry(entry_hash, key); | |
| 411 return simple_entry->CreateEntry(entry, callback); | |
| 412 } | |
| 413 | |
| 414 int SimpleBackendImpl::DoomEntry(const std::string& key, | |
| 415 const net::CompletionCallback& callback) { | |
| 416 const uint64 entry_hash = simple_util::GetEntryHashKey(key); | |
| 417 | |
| 418 base::hash_map<uint64, std::vector<Closure> >::iterator it = | |
| 419 entries_pending_doom_.find(entry_hash); | |
| 420 if (it != entries_pending_doom_.end()) { | |
| 421 Callback<int(const net::CompletionCallback&)> operation = | |
| 422 base::Bind(&SimpleBackendImpl::DoomEntry, base::Unretained(this), key); | |
| 423 it->second.push_back(base::Bind(&RunOperationAndCallback, | |
| 424 operation, callback)); | |
| 425 return net::ERR_IO_PENDING; | |
| 426 } | |
| 427 scoped_refptr<SimpleEntryImpl> simple_entry = | |
| 428 CreateOrFindActiveEntry(entry_hash, key); | |
| 429 return simple_entry->DoomEntry(callback); | |
| 430 } | |
| 431 | |
| 432 int SimpleBackendImpl::DoomAllEntries(const CompletionCallback& callback) { | |
| 433 return DoomEntriesBetween(Time(), Time(), callback); | |
| 434 } | |
| 435 | |
| 436 void SimpleBackendImpl::IndexReadyForDoom(Time initial_time, | |
| 437 Time end_time, | |
| 438 const CompletionCallback& callback, | |
| 439 int result) { | |
| 440 if (result != net::OK) { | |
| 441 callback.Run(result); | |
| 442 return; | |
| 443 } | |
| 444 scoped_ptr<std::vector<uint64> > removed_key_hashes( | |
| 445 index_->GetEntriesBetween(initial_time, end_time).release()); | |
| 446 DoomEntries(removed_key_hashes.get(), callback); | |
| 447 } | |
| 448 | |
| 449 int SimpleBackendImpl::DoomEntriesBetween( | |
| 450 const Time initial_time, | |
| 451 const Time end_time, | |
| 452 const CompletionCallback& callback) { | |
| 453 return index_->ExecuteWhenReady( | |
| 454 base::Bind(&SimpleBackendImpl::IndexReadyForDoom, AsWeakPtr(), | |
| 455 initial_time, end_time, callback)); | |
| 456 } | |
| 457 | |
| 458 int SimpleBackendImpl::DoomEntriesSince( | |
| 459 const Time initial_time, | |
| 460 const CompletionCallback& callback) { | |
| 461 return DoomEntriesBetween(initial_time, Time(), callback); | |
| 462 } | |
| 463 | |
| 464 class SimpleBackendImpl::SimpleIterator final : public Iterator { | |
| 465 public: | |
| 466 explicit SimpleIterator(base::WeakPtr<SimpleBackendImpl> backend) | |
| 467 : backend_(backend), | |
| 468 weak_factory_(this) { | |
| 469 } | |
| 470 | |
| 471 // From Backend::Iterator: | |
| 472 int OpenNextEntry(Entry** next_entry, | |
| 473 const CompletionCallback& callback) override { | |
| 474 CompletionCallback open_next_entry_impl = | |
| 475 base::Bind(&SimpleIterator::OpenNextEntryImpl, | |
| 476 weak_factory_.GetWeakPtr(), next_entry, callback); | |
| 477 return backend_->index_->ExecuteWhenReady(open_next_entry_impl); | |
| 478 } | |
| 479 | |
| 480 void OpenNextEntryImpl(Entry** next_entry, | |
| 481 const CompletionCallback& callback, | |
| 482 int index_initialization_error_code) { | |
| 483 if (!backend_) { | |
| 484 callback.Run(net::ERR_FAILED); | |
| 485 return; | |
| 486 } | |
| 487 if (index_initialization_error_code != net::OK) { | |
| 488 callback.Run(index_initialization_error_code); | |
| 489 return; | |
| 490 } | |
| 491 if (!hashes_to_enumerate_) | |
| 492 hashes_to_enumerate_ = backend_->index()->GetAllHashes().Pass(); | |
| 493 | |
| 494 while (!hashes_to_enumerate_->empty()) { | |
| 495 uint64 entry_hash = hashes_to_enumerate_->back(); | |
| 496 hashes_to_enumerate_->pop_back(); | |
| 497 if (backend_->index()->Has(entry_hash)) { | |
| 498 *next_entry = NULL; | |
| 499 CompletionCallback continue_iteration = base::Bind( | |
| 500 &SimpleIterator::CheckIterationReturnValue, | |
| 501 weak_factory_.GetWeakPtr(), | |
| 502 next_entry, | |
| 503 callback); | |
| 504 int error_code_open = backend_->OpenEntryFromHash(entry_hash, | |
| 505 next_entry, | |
| 506 continue_iteration); | |
| 507 if (error_code_open == net::ERR_IO_PENDING) | |
| 508 return; | |
| 509 if (error_code_open != net::ERR_FAILED) { | |
| 510 callback.Run(error_code_open); | |
| 511 return; | |
| 512 } | |
| 513 } | |
| 514 } | |
| 515 callback.Run(net::ERR_FAILED); | |
| 516 } | |
| 517 | |
| 518 void CheckIterationReturnValue(Entry** entry, | |
| 519 const CompletionCallback& callback, | |
| 520 int error_code) { | |
| 521 if (error_code == net::ERR_FAILED) { | |
| 522 OpenNextEntry(entry, callback); | |
| 523 return; | |
| 524 } | |
| 525 callback.Run(error_code); | |
| 526 } | |
| 527 | |
| 528 private: | |
| 529 base::WeakPtr<SimpleBackendImpl> backend_; | |
| 530 scoped_ptr<std::vector<uint64> > hashes_to_enumerate_; | |
| 531 base::WeakPtrFactory<SimpleIterator> weak_factory_; | |
| 532 }; | |
| 533 | |
| 534 scoped_ptr<Backend::Iterator> SimpleBackendImpl::CreateIterator() { | |
| 535 return scoped_ptr<Iterator>(new SimpleIterator(AsWeakPtr())); | |
| 536 } | |
| 537 | |
| 538 void SimpleBackendImpl::GetStats( | |
| 539 std::vector<std::pair<std::string, std::string> >* stats) { | |
| 540 std::pair<std::string, std::string> item; | |
| 541 item.first = "Cache type"; | |
| 542 item.second = "Simple Cache"; | |
| 543 stats->push_back(item); | |
| 544 } | |
| 545 | |
| 546 void SimpleBackendImpl::OnExternalCacheHit(const std::string& key) { | |
| 547 index_->UseIfExists(simple_util::GetEntryHashKey(key)); | |
| 548 } | |
| 549 | |
| 550 void SimpleBackendImpl::InitializeIndex(const CompletionCallback& callback, | |
| 551 const DiskStatResult& result) { | |
| 552 if (result.net_error == net::OK) { | |
| 553 index_->SetMaxSize(result.max_size); | |
| 554 index_->Initialize(result.cache_dir_mtime); | |
| 555 } | |
| 556 callback.Run(result.net_error); | |
| 557 } | |
| 558 | |
| 559 SimpleBackendImpl::DiskStatResult SimpleBackendImpl::InitCacheStructureOnDisk( | |
| 560 const base::FilePath& path, | |
| 561 uint64 suggested_max_size) { | |
| 562 DiskStatResult result; | |
| 563 result.max_size = suggested_max_size; | |
| 564 result.net_error = net::OK; | |
| 565 if (!FileStructureConsistent(path)) { | |
| 566 LOG(ERROR) << "Simple Cache Backend: wrong file structure on disk: " | |
| 567 << path.LossyDisplayName(); | |
| 568 result.net_error = net::ERR_FAILED; | |
| 569 } else { | |
| 570 bool mtime_result = | |
| 571 disk_cache::simple_util::GetMTime(path, &result.cache_dir_mtime); | |
| 572 DCHECK(mtime_result); | |
| 573 if (!result.max_size) { | |
| 574 int64 available = base::SysInfo::AmountOfFreeDiskSpace(path); | |
| 575 result.max_size = disk_cache::PreferredCacheSize(available); | |
| 576 } | |
| 577 DCHECK(result.max_size); | |
| 578 } | |
| 579 return result; | |
| 580 } | |
| 581 | |
| 582 scoped_refptr<SimpleEntryImpl> SimpleBackendImpl::CreateOrFindActiveEntry( | |
| 583 const uint64 entry_hash, | |
| 584 const std::string& key) { | |
| 585 DCHECK_EQ(entry_hash, simple_util::GetEntryHashKey(key)); | |
| 586 std::pair<EntryMap::iterator, bool> insert_result = | |
| 587 active_entries_.insert(EntryMap::value_type(entry_hash, NULL)); | |
| 588 EntryMap::iterator& it = insert_result.first; | |
| 589 const bool did_insert = insert_result.second; | |
| 590 if (did_insert) { | |
| 591 SimpleEntryImpl* entry = it->second = | |
| 592 new SimpleEntryImpl(cache_type_, path_, entry_hash, | |
| 593 entry_operations_mode_,this, net_log_); | |
| 594 entry->SetKey(key); | |
| 595 entry->SetActiveEntryProxy(ActiveEntryProxy::Create(entry_hash, this)); | |
| 596 } | |
| 597 DCHECK(it->second); | |
| 598 // It's possible, but unlikely, that we have an entry hash collision with a | |
| 599 // currently active entry. | |
| 600 if (key != it->second->key()) { | |
| 601 it->second->Doom(); | |
| 602 DCHECK_EQ(0U, active_entries_.count(entry_hash)); | |
| 603 return CreateOrFindActiveEntry(entry_hash, key); | |
| 604 } | |
| 605 return make_scoped_refptr(it->second); | |
| 606 } | |
| 607 | |
| 608 int SimpleBackendImpl::OpenEntryFromHash(uint64 entry_hash, | |
| 609 Entry** entry, | |
| 610 const CompletionCallback& callback) { | |
| 611 base::hash_map<uint64, std::vector<Closure> >::iterator it = | |
| 612 entries_pending_doom_.find(entry_hash); | |
| 613 if (it != entries_pending_doom_.end()) { | |
| 614 Callback<int(const net::CompletionCallback&)> operation = | |
| 615 base::Bind(&SimpleBackendImpl::OpenEntryFromHash, | |
| 616 base::Unretained(this), entry_hash, entry); | |
| 617 it->second.push_back(base::Bind(&RunOperationAndCallback, | |
| 618 operation, callback)); | |
| 619 return net::ERR_IO_PENDING; | |
| 620 } | |
| 621 | |
| 622 EntryMap::iterator has_active = active_entries_.find(entry_hash); | |
| 623 if (has_active != active_entries_.end()) { | |
| 624 return OpenEntry(has_active->second->key(), entry, callback); | |
| 625 } | |
| 626 | |
| 627 scoped_refptr<SimpleEntryImpl> simple_entry = new SimpleEntryImpl( | |
| 628 cache_type_, path_, entry_hash, entry_operations_mode_, this, net_log_); | |
| 629 CompletionCallback backend_callback = | |
| 630 base::Bind(&SimpleBackendImpl::OnEntryOpenedFromHash, | |
| 631 AsWeakPtr(), entry_hash, entry, simple_entry, callback); | |
| 632 return simple_entry->OpenEntry(entry, backend_callback); | |
| 633 } | |
| 634 | |
| 635 int SimpleBackendImpl::DoomEntryFromHash(uint64 entry_hash, | |
| 636 const CompletionCallback& callback) { | |
| 637 Entry** entry = new Entry*(); | |
| 638 scoped_ptr<Entry*> scoped_entry(entry); | |
| 639 | |
| 640 base::hash_map<uint64, std::vector<Closure> >::iterator pending_it = | |
| 641 entries_pending_doom_.find(entry_hash); | |
| 642 if (pending_it != entries_pending_doom_.end()) { | |
| 643 Callback<int(const net::CompletionCallback&)> operation = | |
| 644 base::Bind(&SimpleBackendImpl::DoomEntryFromHash, | |
| 645 base::Unretained(this), entry_hash); | |
| 646 pending_it->second.push_back(base::Bind(&RunOperationAndCallback, | |
| 647 operation, callback)); | |
| 648 return net::ERR_IO_PENDING; | |
| 649 } | |
| 650 | |
| 651 EntryMap::iterator active_it = active_entries_.find(entry_hash); | |
| 652 if (active_it != active_entries_.end()) | |
| 653 return active_it->second->DoomEntry(callback); | |
| 654 | |
| 655 // There's no pending dooms, nor any open entry. We can make a trivial | |
| 656 // call to DoomEntries() to delete this entry. | |
| 657 std::vector<uint64> entry_hash_vector; | |
| 658 entry_hash_vector.push_back(entry_hash); | |
| 659 DoomEntries(&entry_hash_vector, callback); | |
| 660 return net::ERR_IO_PENDING; | |
| 661 } | |
| 662 | |
| 663 void SimpleBackendImpl::OnEntryOpenedFromHash( | |
| 664 uint64 hash, | |
| 665 Entry** entry, | |
| 666 const scoped_refptr<SimpleEntryImpl>& simple_entry, | |
| 667 const CompletionCallback& callback, | |
| 668 int error_code) { | |
| 669 if (error_code != net::OK) { | |
| 670 callback.Run(error_code); | |
| 671 return; | |
| 672 } | |
| 673 DCHECK(*entry); | |
| 674 std::pair<EntryMap::iterator, bool> insert_result = | |
| 675 active_entries_.insert(EntryMap::value_type(hash, simple_entry.get())); | |
| 676 EntryMap::iterator& it = insert_result.first; | |
| 677 const bool did_insert = insert_result.second; | |
| 678 if (did_insert) { | |
| 679 // There was no active entry corresponding to this hash. We've already put | |
| 680 // the entry opened from hash in the |active_entries_|. We now provide the | |
| 681 // proxy object to the entry. | |
| 682 it->second->SetActiveEntryProxy(ActiveEntryProxy::Create(hash, this)); | |
| 683 callback.Run(net::OK); | |
| 684 } else { | |
| 685 // The entry was made active while we waiting for the open from hash to | |
| 686 // finish. The entry created from hash needs to be closed, and the one | |
| 687 // in |active_entries_| can be returned to the caller. | |
| 688 simple_entry->Close(); | |
| 689 it->second->OpenEntry(entry, callback); | |
| 690 } | |
| 691 } | |
| 692 | |
| 693 void SimpleBackendImpl::OnEntryOpenedFromKey( | |
| 694 const std::string key, | |
| 695 Entry** entry, | |
| 696 const scoped_refptr<SimpleEntryImpl>& simple_entry, | |
| 697 const CompletionCallback& callback, | |
| 698 int error_code) { | |
| 699 int final_code = error_code; | |
| 700 if (final_code == net::OK) { | |
| 701 bool key_matches = key.compare(simple_entry->key()) == 0; | |
| 702 if (!key_matches) { | |
| 703 // TODO(clamy): Add a unit test to check this code path. | |
| 704 DLOG(WARNING) << "Key mismatch on open."; | |
| 705 simple_entry->Doom(); | |
| 706 simple_entry->Close(); | |
| 707 final_code = net::ERR_FAILED; | |
| 708 } else { | |
| 709 DCHECK_EQ(simple_entry->entry_hash(), simple_util::GetEntryHashKey(key)); | |
| 710 } | |
| 711 SIMPLE_CACHE_UMA(BOOLEAN, "KeyMatchedOnOpen", cache_type_, key_matches); | |
| 712 } | |
| 713 callback.Run(final_code); | |
| 714 } | |
| 715 | |
| 716 void SimpleBackendImpl::DoomEntriesComplete( | |
| 717 scoped_ptr<std::vector<uint64> > entry_hashes, | |
| 718 const net::CompletionCallback& callback, | |
| 719 int result) { | |
| 720 std::for_each( | |
| 721 entry_hashes->begin(), entry_hashes->end(), | |
| 722 std::bind1st(std::mem_fun(&SimpleBackendImpl::OnDoomComplete), | |
| 723 this)); | |
| 724 callback.Run(result); | |
| 725 } | |
| 726 | |
| 727 void SimpleBackendImpl::FlushWorkerPoolForTesting() { | |
| 728 if (g_sequenced_worker_pool) | |
| 729 g_sequenced_worker_pool->FlushForTesting(); | |
| 730 } | |
| 731 | |
| 732 } // namespace disk_cache | |
| OLD | NEW |