Chromium Code Reviews| Index: net/disk_cache/simple/simple_backend_impl.cc |
| diff --git a/net/disk_cache/simple/simple_backend_impl.cc b/net/disk_cache/simple/simple_backend_impl.cc |
| index 4aed36c78fc6c7db72720e3dcd84a380e72dca77..5a3d3efff89867b585f924b4dd79a755595ae58d 100644 |
| --- a/net/disk_cache/simple/simple_backend_impl.cc |
| +++ b/net/disk_cache/simple/simple_backend_impl.cc |
| @@ -59,6 +59,13 @@ const int kMaxFileRatio = 8; |
| // A global sequenced worker pool to use for launching all tasks. |
| SequencedWorkerPool* g_sequenced_worker_pool = NULL; |
| +struct DoomEntrySetContext : public base::RefCounted<DoomEntrySetContext> { |
|
gavinp
2013/08/26 20:41:37
This probably should be a class, since it inherits
Philippe
2013/08/27 11:33:30
Yes. In general I use structs when I only use publ
|
| + DoomEntrySetContext() : entries_left(0), error_happened(false) {} |
| + |
| + int entries_left; // Number of entries that remain to be doomed. |
| + bool error_happened; |
| +}; |
| + |
| void MaybeCreateSequencedWorkerPool() { |
| if (!g_sequenced_worker_pool) { |
| int max_worker_threads = kDefaultMaxWorkerThreads; |
| @@ -205,6 +212,22 @@ void RecordIndexLoad(base::TimeTicks constructed_since, int result) { |
| UMA_HISTOGRAM_TIMES("SimpleCache.CreationToIndexFail", creation_to_index); |
| } |
| +// Used only by mass dooming to execute the client-provided callback only once |
|
Deprecated (see juliatuttle)
2013/08/26 21:34:18
It is clever that you reuse it as the "single entr
Philippe
2013/08/27 11:33:30
Yes, good point. Thanks.
|
| +// all the entries are doomed. |
| +void OnDoomEntriesCompleted( |
| + DoomEntrySetContext* context, |
| + const net::CompletionCallback& all_entries_doomed_callback, |
| + int doomed_entries_count, |
| + int net_error) { |
| + context->entries_left -= doomed_entries_count; |
| + if (net_error == net::ERR_FAILED) |
| + context->error_happened = true; |
| + if (context->entries_left == 0 && !all_entries_doomed_callback.is_null()) { |
| + all_entries_doomed_callback.Run( |
| + context->error_happened ? net::ERR_FAILED : net::OK); |
| + } |
| +} |
| + |
| } // namespace |
| namespace disk_cache { |
| @@ -317,7 +340,10 @@ void SimpleBackendImpl::IndexReadyForDoom(Time initial_time, |
| } |
| scoped_ptr<std::vector<uint64> > removed_key_hashes( |
| index_->RemoveEntriesBetween(initial_time, end_time).release()); |
| - |
| + const scoped_refptr<DoomEntrySetContext> context(new DoomEntrySetContext()); |
| + const CompletionCallback on_entry_doomed_callback = base::Bind( |
| + &OnDoomEntriesCompleted, context, callback, 1 /* entry count */); |
| + int doomed_active_entries_count = 0; |
| // If any of the entries we are dooming are currently open, we need to remove |
| // them from |active_entries_|, so that attempts to create new entries will |
| // succeed and attempts to open them will fail. |
| @@ -326,18 +352,22 @@ void SimpleBackendImpl::IndexReadyForDoom(Time initial_time, |
| EntryMap::iterator it = active_entries_.find(entry_hash); |
| if (it == active_entries_.end()) |
| continue; |
| + ++doomed_active_entries_count; |
| SimpleEntryImpl* entry = it->second.get(); |
| - entry->Doom(); |
| + entry->DoomEntry(on_entry_doomed_callback); |
| (*removed_key_hashes)[i] = removed_key_hashes->back(); |
| removed_key_hashes->resize(removed_key_hashes->size() - 1); |
| } |
| + context->entries_left += doomed_active_entries_count; |
| + context->entries_left += removed_key_hashes->size(); |
| PostTaskAndReplyWithResult( |
| worker_pool_, FROM_HERE, |
| base::Bind(&SimpleSynchronousEntry::DoomEntrySet, |
| base::Passed(&removed_key_hashes), path_), |
| - base::Bind(&CallCompletionCallback, callback)); |
| + base::Bind(&OnDoomEntriesCompleted, context, callback, |
| + removed_key_hashes->size())); |
| } |
| int SimpleBackendImpl::DoomEntriesBetween( |