Chromium Code Reviews| Index: net/disk_cache/simple/simple_backend_impl.cc |
| diff --git a/net/disk_cache/simple/simple_backend_impl.cc b/net/disk_cache/simple/simple_backend_impl.cc |
| index 009076fd822193f50020986b8a75cee4e7abafbc..32580575a8081d38ab239976aa76e47adaa1b72a 100644 |
| --- a/net/disk_cache/simple/simple_backend_impl.cc |
| +++ b/net/disk_cache/simple/simple_backend_impl.cc |
| @@ -24,7 +24,8 @@ |
| using base::Closure; |
| using base::FilePath; |
| using base::MessageLoopProxy; |
| -using base::SingleThreadTaskRunner; |
| +using base::SequencedWorkerPool; |
| +using base::TaskRunner; |
| using base::Time; |
| using base::WorkerPool; |
| using file_util::DirectoryExists; |
| @@ -32,6 +33,12 @@ using file_util::CreateDirectory; |
| namespace { |
| +// Max thread pool size. |
| +const int kMaxThreads = 50; |
|
Randy Smith (Not in Mondays)
2013/05/21 21:15:30
Yikes. There's a real, and reasonably high memory
|
| + |
| +// Prefix given to all threads to aid in debugging. |
| +const char kThreadNamePrefix[] = "SimpleCache"; |
| + |
| // Cache size when all other size heuristics failed. |
| const uint64 kDefaultCacheSize = 80 * 1024 * 1024; |
| @@ -119,6 +126,17 @@ void CallCompletionCallback(const net::CompletionCallback& callback, |
| callback.Run(*result); |
| } |
| +// Creates a task runner which will serialize operations on an ActiveEntry. |
| +// Must be called on the |inactive_entry_task_runner_| to correctly serialize |
| +// with pending mass doom operations. |
| +void CreateTaskRunnerForEntry(SequencedWorkerPool* sequenced_worker_pool, |
| + scoped_refptr<TaskRunner>* out_task_runner) { |
| + *out_task_runner = |
| + sequenced_worker_pool->GetSequencedTaskRunnerWithShutdownBehavior( |
| + sequenced_worker_pool->GetSequenceToken(), |
| + SequencedWorkerPool::CONTINUE_ON_SHUTDOWN); |
| +} |
| + |
| void RecordIndexLoad(base::TimeTicks constructed_since, int result) { |
| const base::TimeDelta creation_to_index = base::TimeTicks::Now() - |
| constructed_since; |
| @@ -143,13 +161,20 @@ SimpleBackendImpl::SimpleBackendImpl( |
| MessageLoopProxy::current(), // io_thread |
| path)), |
| cache_thread_(cache_thread), |
| - orig_max_size_(max_bytes) { |
| + orig_max_size_(max_bytes), |
| + worker_pool_(new base::SequencedWorkerPool(kMaxThreads, |
| + kThreadNamePrefix)), |
| + inactive_entry_task_runner_( |
| + worker_pool_->GetSequencedTaskRunnerWithShutdownBehavior( |
| + worker_pool_->GetSequenceToken(), |
| + SequencedWorkerPool::SKIP_ON_SHUTDOWN)) { |
| index_->ExecuteWhenReady(base::Bind(&RecordIndexLoad, |
| base::TimeTicks::Now())); |
| } |
| SimpleBackendImpl::~SimpleBackendImpl() { |
| index_->WriteToDisk(); |
| + worker_pool_->Shutdown(); |
| } |
| int SimpleBackendImpl::Init(const CompletionCallback& completion_callback) { |
| @@ -176,9 +201,17 @@ int SimpleBackendImpl::GetMaxFileSize() const { |
| return index_->max_size() / kMaxFileRatio; |
| } |
| -void SimpleBackendImpl::OnDeactivated(const SimpleEntryImpl* entry) { |
| - DCHECK_LT(0U, active_entries_.count(entry->entry_hash())); |
| - active_entries_.erase(entry->entry_hash()); |
| +void SimpleBackendImpl::DeactivateEntry(uint64 entry_hash) { |
| + DCHECK_NE(0U, active_entries_.count(entry_hash)); |
| + active_entries_[entry_hash].entry = NULL; |
| +} |
| + |
| +void SimpleBackendImpl::ClosedEntry(uint64 entry_hash) { |
| + DCHECK_NE(0U, active_entries_.count(entry_hash)); |
| + EntryMap::iterator it = active_entries_.find(entry_hash); |
| + ActiveEntry& active_entry = it->second; |
| + if (!active_entry.entry) |
| + active_entries_.erase(it); |
| } |
| net::CacheType SimpleBackendImpl::GetCacheType() const { |
| @@ -233,8 +266,8 @@ void SimpleBackendImpl::IndexReadyForDoom(Time initial_time, |
| EntryMap::iterator it = active_entries_.find(entry_hash); |
| if (it == active_entries_.end()) |
| continue; |
| - SimpleEntryImpl* entry = it->second; |
| - entry->Doom(); |
| + if (SimpleEntryImpl* entry = it->second.entry) |
| + entry->Doom(); |
| (*removed_key_hashes)[i] = removed_key_hashes->back(); |
| removed_key_hashes->resize(removed_key_hashes->size() - 1); |
| @@ -284,6 +317,75 @@ void SimpleBackendImpl::OnExternalCacheHit(const std::string& key) { |
| index_->UseIfExists(key); |
| } |
| +// private: |
| + |
| +// A simple task runner which either calls through to an underlying TaskRunner, |
| +// or if none is provided, enqueues tasks to run when a TaskRunner is finally |
| +// given. |
| +class SimpleBackendImpl::ProxyTaskRunner : public TaskRunner { |
|
pasko
2013/05/21 16:00:01
A more self-descriptive name would be:
BarrierTask
Randy Smith (Not in Mondays)
2013/05/21 21:15:30
nit: I think helper classes are supposed to be at
|
| + public: |
| + ProxyTaskRunner() { |
| + } |
| + |
| + void SetTaskRunner( |
| + scoped_ptr<scoped_refptr<base::TaskRunner> > in_task_runner) { |
| + DCHECK(!task_runner_); |
| + |
| + task_runner_.swap(*in_task_runner); |
| + while (!tasks_.empty()) { |
| + task_runner_->PostTask(tasks_.front().first, tasks_.front().second); |
| + tasks_.pop(); |
| + } |
| + } |
| + |
| + bool IsEmpty() const { |
| + return !task_runner_ && tasks_.empty(); |
| + } |
| + |
| + // From TaskRunner: |
| + virtual bool PostDelayedTask(const tracked_objects::Location& from_here, |
| + const Closure& task, |
| + base::TimeDelta delay) OVERRIDE { |
| + if (task_runner_) |
| + return task_runner_->PostDelayedTask(from_here, task, delay); |
| + tasks_.push(std::make_pair(from_here, task)); |
| + return true; |
| + } |
| + |
| + virtual bool RunsTasksOnCurrentThread() const OVERRIDE { |
| + if (task_runner_) |
| + return task_runner_->RunsTasksOnCurrentThread(); |
| + return false; |
| + } |
| + |
| + private: |
| + virtual ~ProxyTaskRunner() { |
| + } |
| + |
| + std::queue<std::pair<tracked_objects::Location, Closure> > tasks_; |
| + scoped_refptr<TaskRunner> task_runner_; |
| +}; |
| + |
| +SimpleBackendImpl::ActiveEntry::ActiveEntry() |
| + : task_runner(new SimpleBackendImpl::ProxyTaskRunner()), |
| + entry(NULL) { |
| +} |
| + |
| +SimpleBackendImpl::ActiveEntry::ActiveEntry(const ActiveEntry& other) |
| + : task_runner(other.task_runner), |
| + entry(other.entry) { |
| +} |
| + |
| +SimpleBackendImpl::ActiveEntry::~ActiveEntry() { |
| +} |
| + |
| +SimpleBackendImpl::ActiveEntry& SimpleBackendImpl::ActiveEntry::operator=( |
| + const ActiveEntry& other) { |
| + task_runner = other.task_runner; |
| + entry = other.entry; |
| + return *this; |
| +} |
| + |
| void SimpleBackendImpl::InitializeIndex( |
| const CompletionCallback& callback, uint64 suggested_max_size, int result) { |
| if (result == net::OK) { |
| @@ -295,7 +397,7 @@ void SimpleBackendImpl::InitializeIndex( |
| // static |
| void SimpleBackendImpl::ProvideDirectorySuggestBetterCacheSize( |
| - SingleThreadTaskRunner* io_thread, |
| + base::SingleThreadTaskRunner* io_thread, |
| const base::FilePath& path, |
| const InitializeIndexCallback& initialize_index_callback, |
| uint64 suggested_max_size) { |
| @@ -325,25 +427,38 @@ scoped_refptr<SimpleEntryImpl> SimpleBackendImpl::CreateOrFindActiveEntry( |
| const std::string& key) { |
| const uint64 entry_hash = simple_util::GetEntryHashKey(key); |
| - std::pair<EntryMap::iterator, bool> insert_result = |
| - active_entries_.insert(std::make_pair(entry_hash, |
| - base::WeakPtr<SimpleEntryImpl>())); |
| + std::pair<EntryMap::iterator, bool> insert_result = active_entries_.insert( |
| + std::make_pair(entry_hash, ActiveEntry())); |
| EntryMap::iterator& it = insert_result.first; |
| - if (insert_result.second) |
| - DCHECK(!it->second); |
| - if (!it->second) { |
| - SimpleEntryImpl* entry = new SimpleEntryImpl(this, path_, key, entry_hash); |
| - it->second = entry->AsWeakPtr(); |
| + ActiveEntry& active_entry = it->second; |
| + |
| + if (insert_result.second) { |
| + // If this is a new ActiveEntry, we have to asynchronously construct a new |
| + // task_runner for it. |
| + scoped_ptr<scoped_refptr<base::TaskRunner> > |
| + out_task_runner(new scoped_refptr<base::TaskRunner>()); |
| + inactive_entry_task_runner_->PostTaskAndReply( |
|
pasko
2013/05/21 16:00:01
Why do we have to serialize token creation via a w
gavinp
2013/05/21 16:55:02
What if a mass doom operation is running right now
pasko-google - do not use
2013/05/21 17:25:47
something around the lines of SimpleEntryImpl::Ope
Randy Smith (Not in Mondays)
2013/05/21 21:15:30
What are the performance costs of doing a mass doo
|
| + FROM_HERE, |
| + base::Bind(&CreateTaskRunnerForEntry, worker_pool_, |
| + out_task_runner.get()), |
| + base::Bind(&ProxyTaskRunner::SetTaskRunner, active_entry.task_runner, |
| + base::Passed(&out_task_runner))); |
| } |
| - DCHECK(it->second); |
| + |
| + if (!active_entry.entry) { |
| + active_entry.entry = new SimpleEntryImpl(this, active_entry.task_runner, |
| + path_, key, entry_hash); |
| + } |
| + |
| // It's possible, but unlikely, that we have an entry hash collision with a |
| // currently active entry. |
| - if (key != it->second->key()) { |
| - it->second->Doom(); |
| + if (key != active_entry.entry->key()) { |
| + active_entry.entry->Doom(); |
| DCHECK_EQ(0U, active_entries_.count(entry_hash)); |
| return CreateOrFindActiveEntry(key); |
| } |
| - return make_scoped_refptr(it->second.get()); |
| + |
| + return make_scoped_refptr(active_entry.entry); |
| } |
| } // namespace disk_cache |