Chromium Code Reviews| Index: net/disk_cache/memory/mem_entry_impl.cc |
| diff --git a/net/disk_cache/memory/mem_entry_impl.cc b/net/disk_cache/memory/mem_entry_impl.cc |
| index 692551866a6b71ef1cc2b155fcbc769df9111fc3..c28a7fab748d565877004f12331eb6db71efbfb4 100644 |
| --- a/net/disk_cache/memory/mem_entry_impl.cc |
| +++ b/net/disk_cache/memory/mem_entry_impl.cc |
| @@ -4,6 +4,7 @@ |
| #include "net/disk_cache/memory/mem_entry_impl.h" |
| +#include <algorithm> |
| #include <utility> |
| #include "base/bind.h" |
| @@ -17,6 +18,8 @@ |
| using base::Time; |
| +namespace disk_cache { |
| + |
| namespace { |
| const int kSparseData = 1; |
| @@ -28,12 +31,12 @@ const int kMaxSparseEntryBits = 12; |
| const int kMaxSparseEntrySize = 1 << kMaxSparseEntryBits; |
| // Convert global offset to child index. |
| -inline int ToChildIndex(int64_t offset) { |
| +int ToChildIndex(int64_t offset) { |
| return static_cast<int>(offset >> kMaxSparseEntryBits); |
| } |
| // Convert global offset to offset in child entry. |
| -inline int ToChildOffset(int64_t offset) { |
| +int ToChildOffset(int64_t offset) { |
| return static_cast<int>(offset & (kMaxSparseEntrySize - 1)); |
| } |
| @@ -45,129 +48,110 @@ std::string GenerateChildName(const std::string& base_name, int child_id) { |
| return base::StringPrintf("Range_%s:%i", base_name.c_str(), child_id); |
| } |
| -// Returns NetLog parameters for the creation of a child MemEntryImpl. Separate |
| -// function needed because child entries don't suppport GetKey(). |
| -scoped_ptr<base::Value> NetLogChildEntryCreationCallback( |
| - const disk_cache::MemEntryImpl* parent, |
| - int child_id, |
| +// Returns NetLog parameters for the creation of a MemEntryImpl. A separate |
| +// function is needed because child entries don't store their key(). |
| +scoped_ptr<base::Value> NetLogEntryCreationCallback( |
| + const MemEntryImpl* entry, |
| net::NetLogCaptureMode /* capture_mode */) { |
| scoped_ptr<base::DictionaryValue> dict(new base::DictionaryValue()); |
| - dict->SetString("key", GenerateChildName(parent->GetKey(), child_id)); |
| + std::string key; |
| + switch (entry->type()) { |
| + case MemEntryImpl::PARENT_ENTRY: |
| + key = entry->key(); |
| + break; |
| + case MemEntryImpl::CHILD_ENTRY: |
| + key = GenerateChildName(entry->parent()->key(), entry->child_id()); |
| + break; |
| + } |
| + dict->SetString("key", key); |
| dict->SetBoolean("created", true); |
| return std::move(dict); |
| } |
| } // namespace |
| -namespace disk_cache { |
| - |
| -MemEntryImpl::MemEntryImpl(MemBackendImpl* backend) { |
| - doomed_ = false; |
| - backend_ = backend; |
| - ref_count_ = 0; |
| - parent_ = NULL; |
| - child_id_ = 0; |
| - child_first_pos_ = 0; |
| - next_ = NULL; |
| - prev_ = NULL; |
| - for (int i = 0; i < NUM_STREAMS; i++) |
| - data_size_[i] = 0; |
| -} |
| - |
| -// ------------------------------------------------------------------------ |
| - |
| -bool MemEntryImpl::CreateEntry(const std::string& key, net::NetLog* net_log) { |
| - key_ = key; |
| - Time current = Time::Now(); |
| - last_modified_ = current; |
| - last_used_ = current; |
| - |
| - net_log_ = net::BoundNetLog::Make(net_log, |
| - net::NetLog::SOURCE_MEMORY_CACHE_ENTRY); |
| - // Must be called after |key_| is set, so GetKey() works. |
| - net_log_.BeginEvent( |
| - net::NetLog::TYPE_DISK_CACHE_MEM_ENTRY_IMPL, |
| - CreateNetLogEntryCreationCallback(this, true)); |
| - |
| +MemEntryImpl::MemEntryImpl(MemBackendImpl* backend, |
| + const std::string& key, |
| + net::NetLog* net_log) |
| + : MemEntryImpl(backend, |
| + key, |
| + 0, // child_id |
| + nullptr, // parent |
| + net_log) { |
| Open(); |
| - backend_->ModifyStorageSize(0, static_cast<int32_t>(key.size())); |
| - return true; |
| + backend_->ModifyStorageSize(GetStorageSize()); |
| } |
| -void MemEntryImpl::InternalDoom() { |
| - net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM); |
| - doomed_ = true; |
| - if (!ref_count_) { |
| - if (type() == kParentEntry) { |
| - // If this is a parent entry, we need to doom all the child entries. |
| - if (children_.get()) { |
| - EntryMap children; |
| - children.swap(*children_); |
| - for (EntryMap::iterator i = children.begin(); |
| - i != children.end(); ++i) { |
| - // Since a pointer to this object is also saved in the map, avoid |
| - // dooming it. |
| - if (i->second != this) |
| - i->second->Doom(); |
| - } |
| - DCHECK(children_->empty()); |
| - } |
| - } else { |
| - // If this is a child entry, detach it from the parent. |
| - parent_->DetachChild(child_id_); |
| - } |
| - delete this; |
| - } |
| +MemEntryImpl::MemEntryImpl(MemBackendImpl* backend, |
| + int child_id, |
| + MemEntryImpl* parent, |
| + net::NetLog* net_log) |
| + : MemEntryImpl(backend, |
| + std::string(), // key |
| + child_id, |
| + parent, |
| + net_log) { |
| + (*parent_->children_)[child_id] = this; |
| } |
| void MemEntryImpl::Open() { |
| // Only a parent entry can be opened. |
| - // TODO(hclam): make sure it's correct to not apply the concept of ref |
| - // counting to child entry. |
| - DCHECK(type() == kParentEntry); |
| - ref_count_++; |
| - DCHECK_GE(ref_count_, 0); |
| + DCHECK_EQ(PARENT_ENTRY, type()); |
| + ++ref_count_; |
| + DCHECK_GE(ref_count_, 1); |
| DCHECK(!doomed_); |
| } |
| -bool MemEntryImpl::InUse() { |
| - if (type() == kParentEntry) { |
| +bool MemEntryImpl::InUse() const { |
| + if (type() == PARENT_ENTRY) { |
| return ref_count_ > 0; |
| } else { |
| - // A child entry is always not in use. The consequence is that a child entry |
| - // can always be evicted while the associated parent entry is currently in |
| - // used (i.e. opened). |
| + // TODO(gavinp): Can't this just be a DCHECK? How would ref_count_ not be |
| + // zero? |
| + |
| + // A child entry is never in use. Thus one can always be evicted, even while |
| + // its parent entry is open and in use. |
| return false; |
| } |
| } |
| -// ------------------------------------------------------------------------ |
| +int MemEntryImpl::GetStorageSize() const { |
| + int storage_size = static_cast<int32_t>(key_.size()); |
| + for (const auto& i : data_) |
| + storage_size += i.size(); |
| + return storage_size; |
| +} |
| + |
| +void MemEntryImpl::UpdateStateOnUse(EntryModified modified_enum) { |
| + if (!doomed_) |
| + backend_->OnEntryUpdated(this); |
| + |
| + last_used_ = Time::Now(); |
| + if (modified_enum == ENTRY_WAS_MODIFIED) |
| + last_modified_ = last_used_; |
| +} |
| void MemEntryImpl::Doom() { |
| - if (doomed_) |
| - return; |
| - if (type() == kParentEntry) { |
| - // Perform internal doom from the backend if this is a parent entry. |
| - backend_->InternalDoomEntry(this); |
| - } else { |
| - // Manually detach from the backend and perform internal doom. |
| - backend_->RemoveFromRankingList(this); |
| - InternalDoom(); |
| + if (!doomed_) { |
| + doomed_ = true; |
| + backend_->OnEntryDoomed(this); |
| + net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM); |
| } |
| + if (!ref_count_) |
| + delete this; |
| } |
| void MemEntryImpl::Close() { |
| - // Only a parent entry can be closed. |
| - DCHECK(type() == kParentEntry); |
| - ref_count_--; |
| + DCHECK_EQ(PARENT_ENTRY, type()); |
| + --ref_count_; |
| DCHECK_GE(ref_count_, 0); |
| if (!ref_count_ && doomed_) |
| - InternalDoom(); |
| + delete this; |
| } |
| std::string MemEntryImpl::GetKey() const { |
| // A child entry doesn't have key so this method should not be called. |
| - DCHECK(type() == kParentEntry); |
| + DCHECK_EQ(PARENT_ENTRY, type()); |
| return key_; |
| } |
| @@ -180,9 +164,9 @@ Time MemEntryImpl::GetLastModified() const { |
| } |
| int32_t MemEntryImpl::GetDataSize(int index) const { |
| - if (index < 0 || index >= NUM_STREAMS) |
| + if (index < 0 || index >= kNumStreams) |
| return 0; |
| - return data_size_[index]; |
| + return data_[index].size(); |
| } |
| int MemEntryImpl::ReadData(int index, int offset, IOBuffer* buf, int buf_len, |
| @@ -260,7 +244,7 @@ int MemEntryImpl::GetAvailableRange(int64_t offset, |
| net::NetLog::TYPE_SPARSE_GET_RANGE, |
| CreateNetLogSparseOperationCallback(offset, len)); |
| } |
| - int result = GetAvailableRange(offset, len, start); |
| + int result = InternalGetAvailableRange(offset, len, start); |
| if (net_log_.IsCapturing()) { |
| net_log_.EndEvent( |
| net::NetLog::TYPE_SPARSE_GET_RANGE, |
| @@ -270,8 +254,8 @@ int MemEntryImpl::GetAvailableRange(int64_t offset, |
| } |
| bool MemEntryImpl::CouldBeSparse() const { |
| - DCHECK_EQ(kParentEntry, type()); |
| - return (children_.get() != NULL); |
| + DCHECK_EQ(PARENT_ENTRY, type()); |
| + return (children_.get() != nullptr); |
| } |
| int MemEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) { |
| @@ -280,41 +264,73 @@ int MemEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) { |
| // ------------------------------------------------------------------------ |
| +MemEntryImpl::MemEntryImpl(MemBackendImpl* backend, |
| + const ::std::string& key, |
| + int child_id, |
| + MemEntryImpl* parent, |
| + net::NetLog* net_log) |
| + : key_(key), |
| + ref_count_(0), |
| + child_id_(child_id), |
| + child_first_pos_(0), |
| + parent_(parent), |
| + last_modified_(Time::Now()), |
| + last_used_(last_modified_), |
| + backend_(backend), |
| + doomed_(false) { |
| + backend_->OnEntryInserted(this); |
| + net_log_ = |
| + net::BoundNetLog::Make(net_log, net::NetLog::SOURCE_MEMORY_CACHE_ENTRY); |
| + net_log_.BeginEvent(net::NetLog::TYPE_DISK_CACHE_MEM_ENTRY_IMPL, |
| + base::Bind(&NetLogEntryCreationCallback, this)); |
| +} |
| + |
| MemEntryImpl::~MemEntryImpl() { |
| - for (int i = 0; i < NUM_STREAMS; i++) |
| - backend_->ModifyStorageSize(data_size_[i], 0); |
| - backend_->ModifyStorageSize(static_cast<int32_t>(key_.size()), 0); |
| + backend_->ModifyStorageSize(-GetStorageSize()); |
| + |
| + if (type() == PARENT_ENTRY) { |
| + if (children_) { |
| + EntryMap children; |
| + children_->swap(children); |
| + |
| + for (auto& it : children) { |
| + // Since |this| is stored in the map, it should be guarded against |
| + // double dooming, which will result in double destruction. |
| + if (it.second != this) |
| + it.second->Doom(); |
|
mmenke
2016/02/19 19:08:32
So the problem was the child calling backend_->Mod
gavinp
2016/02/19 19:18:01
No child should report a zero size. Nor is that ve
mmenke
2016/02/19 19:20:43
Oh! For some reason I read it (twice) as "== 0",
|
| + } |
| + } |
| + } else { |
| + parent_->children_->erase(child_id_); |
| + } |
| net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_MEM_ENTRY_IMPL); |
| } |
| int MemEntryImpl::InternalReadData(int index, int offset, IOBuffer* buf, |
| int buf_len) { |
| - DCHECK(type() == kParentEntry || index == kSparseData); |
| + DCHECK(type() == PARENT_ENTRY || index == kSparseData); |
| - if (index < 0 || index >= NUM_STREAMS) |
| + if (index < 0 || index >= kNumStreams || buf_len < 0) |
| return net::ERR_INVALID_ARGUMENT; |
| - int entry_size = GetDataSize(index); |
| + int entry_size = data_[index].size(); |
| if (offset >= entry_size || offset < 0 || !buf_len) |
| return 0; |
| - if (buf_len < 0) |
| - return net::ERR_INVALID_ARGUMENT; |
| - |
| if (offset + buf_len > entry_size) |
| buf_len = entry_size - offset; |
| - UpdateRank(false); |
| - |
| - memcpy(buf->data(), &(data_[index])[offset], buf_len); |
| + UpdateStateOnUse(ENTRY_WAS_NOT_MODIFIED); |
| + std::copy(data_[index].begin() + offset, |
| + data_[index].begin() + offset + buf_len, buf->data()); |
| return buf_len; |
| } |
| int MemEntryImpl::InternalWriteData(int index, int offset, IOBuffer* buf, |
| int buf_len, bool truncate) { |
| - DCHECK(type() == kParentEntry || index == kSparseData); |
| + DCHECK(type() == PARENT_ENTRY || index == kSparseData); |
| - if (index < 0 || index >= NUM_STREAMS) |
| + if (index < 0 || index >= kNumStreams) |
| return net::ERR_INVALID_ARGUMENT; |
| if (offset < 0 || buf_len < 0) |
| @@ -328,34 +344,32 @@ int MemEntryImpl::InternalWriteData(int index, int offset, IOBuffer* buf, |
| return net::ERR_FAILED; |
| } |
| - // Read the size at this point. |
| - int entry_size = GetDataSize(index); |
| - |
| - PrepareTarget(index, offset, buf_len); |
| + int old_data_size = data_[index].size(); |
| + if (truncate || old_data_size < offset + buf_len) { |
| + data_[index].resize(offset + buf_len); |
| - if (entry_size < offset + buf_len) { |
| - backend_->ModifyStorageSize(entry_size, offset + buf_len); |
| - data_size_[index] = offset + buf_len; |
| - } else if (truncate) { |
| - if (entry_size > offset + buf_len) { |
| - backend_->ModifyStorageSize(entry_size, offset + buf_len); |
| - data_size_[index] = offset + buf_len; |
| + // Zero fill any hole. |
| + if (old_data_size < offset) { |
| + std::fill(data_[index].begin() + old_data_size, |
| + data_[index].begin() + offset, 0); |
| } |
| + |
| + backend_->ModifyStorageSize(data_[index].size() - old_data_size); |
|
mmenke
2016/02/19 19:36:53
Hrm...Wait...So if we're a child entry, we are not
|
| } |
| - UpdateRank(true); |
| + UpdateStateOnUse(ENTRY_WAS_MODIFIED); |
| if (!buf_len) |
| return 0; |
| - memcpy(&(data_[index])[offset], buf->data(), buf_len); |
| + std::copy(buf->data(), buf->data() + buf_len, data_[index].begin() + offset); |
| return buf_len; |
| } |
| int MemEntryImpl::InternalReadSparseData(int64_t offset, |
| IOBuffer* buf, |
| int buf_len) { |
| - DCHECK(type() == kParentEntry); |
| + DCHECK_EQ(PARENT_ENTRY, type()); |
| if (!InitSparseInfo()) |
| return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; |
| @@ -369,7 +383,7 @@ int MemEntryImpl::InternalReadSparseData(int64_t offset, |
| // Iterate until we have read enough. |
| while (io_buf->BytesRemaining()) { |
| - MemEntryImpl* child = OpenChild(offset + io_buf->BytesConsumed(), false); |
| + MemEntryImpl* child = GetChild(offset + io_buf->BytesConsumed(), false); |
| // No child present for that offset. |
| if (!child) |
| @@ -385,7 +399,7 @@ int MemEntryImpl::InternalReadSparseData(int64_t offset, |
| if (net_log_.IsCapturing()) { |
| net_log_.BeginEvent( |
| net::NetLog::TYPE_SPARSE_READ_CHILD_DATA, |
| - CreateNetLogSparseReadWriteCallback(child->net_log().source(), |
| + CreateNetLogSparseReadWriteCallback(child->net_log_.source(), |
| io_buf->BytesRemaining())); |
| } |
| int ret = child->ReadData(kSparseData, child_offset, io_buf.get(), |
| @@ -405,15 +419,14 @@ int MemEntryImpl::InternalReadSparseData(int64_t offset, |
| io_buf->DidConsume(ret); |
| } |
| - UpdateRank(false); |
| - |
| + UpdateStateOnUse(ENTRY_WAS_NOT_MODIFIED); |
| return io_buf->BytesConsumed(); |
| } |
| int MemEntryImpl::InternalWriteSparseData(int64_t offset, |
| IOBuffer* buf, |
| int buf_len) { |
| - DCHECK(type() == kParentEntry); |
| + DCHECK_EQ(PARENT_ENTRY, type()); |
| if (!InitSparseInfo()) |
| return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; |
| @@ -429,7 +442,7 @@ int MemEntryImpl::InternalWriteSparseData(int64_t offset, |
| // child entry until all |buf_len| bytes are written. The write operation can |
| // start in the middle of an entry. |
| while (io_buf->BytesRemaining()) { |
| - MemEntryImpl* child = OpenChild(offset + io_buf->BytesConsumed(), true); |
| + MemEntryImpl* child = GetChild(offset + io_buf->BytesConsumed(), true); |
| int child_offset = ToChildOffset(offset + io_buf->BytesConsumed()); |
| // Find the right amount to write, this evaluates the remaining bytes to |
| @@ -441,10 +454,9 @@ int MemEntryImpl::InternalWriteSparseData(int64_t offset, |
| int data_size = child->GetDataSize(kSparseData); |
| if (net_log_.IsCapturing()) { |
| - net_log_.BeginEvent( |
| - net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA, |
| - CreateNetLogSparseReadWriteCallback(child->net_log().source(), |
| - write_len)); |
| + net_log_.BeginEvent(net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA, |
| + CreateNetLogSparseReadWriteCallback( |
| + child->net_log_.source(), write_len)); |
| } |
| // Always writes to the child entry. This operation may overwrite data |
| @@ -472,13 +484,14 @@ int MemEntryImpl::InternalWriteSparseData(int64_t offset, |
| io_buf->DidConsume(ret); |
| } |
| - UpdateRank(true); |
| - |
| + UpdateStateOnUse(ENTRY_WAS_MODIFIED); |
| return io_buf->BytesConsumed(); |
| } |
| -int MemEntryImpl::GetAvailableRange(int64_t offset, int len, int64_t* start) { |
| - DCHECK(type() == kParentEntry); |
| +int MemEntryImpl::InternalGetAvailableRange(int64_t offset, |
| + int len, |
| + int64_t* start) { |
| + DCHECK_EQ(PARENT_ENTRY, type()); |
| DCHECK(start); |
| if (!InitSparseInfo()) |
| @@ -487,7 +500,7 @@ int MemEntryImpl::GetAvailableRange(int64_t offset, int len, int64_t* start) { |
| if (offset < 0 || len < 0 || !start) |
| return net::ERR_INVALID_ARGUMENT; |
| - MemEntryImpl* current_child = NULL; |
| + MemEntryImpl* current_child = nullptr; |
| // Find the first child and record the number of empty bytes. |
| int empty = FindNextChild(offset, len, ¤t_child); |
| @@ -521,38 +534,10 @@ int MemEntryImpl::GetAvailableRange(int64_t offset, int len, int64_t* start) { |
| return 0; |
| } |
| -void MemEntryImpl::PrepareTarget(int index, int offset, int buf_len) { |
| - int entry_size = GetDataSize(index); |
| - |
| - if (entry_size >= offset + buf_len) |
| - return; // Not growing the stored data. |
| - |
| - if (static_cast<int>(data_[index].size()) < offset + buf_len) |
| - data_[index].resize(offset + buf_len); |
| - |
| - if (offset <= entry_size) |
| - return; // There is no "hole" on the stored data. |
| - |
| - // Cleanup the hole not written by the user. The point is to avoid returning |
| - // random stuff later on. |
| - memset(&(data_[index])[entry_size], 0, offset - entry_size); |
| -} |
| - |
| -void MemEntryImpl::UpdateRank(bool modified) { |
| - Time current = Time::Now(); |
| - last_used_ = current; |
| - |
| - if (modified) |
| - last_modified_ = current; |
| - |
| - if (!doomed_) |
| - backend_->UpdateRank(this); |
| -} |
| - |
| bool MemEntryImpl::InitSparseInfo() { |
| - DCHECK(type() == kParentEntry); |
| + DCHECK_EQ(PARENT_ENTRY, type()); |
| - if (!children_.get()) { |
| + if (!children_) { |
| // If we already have some data in sparse stream but we are being |
| // initialized as a sparse entry, we should fail. |
| if (GetDataSize(kSparseData)) |
| @@ -566,52 +551,27 @@ bool MemEntryImpl::InitSparseInfo() { |
| return true; |
| } |
| -bool MemEntryImpl::InitChildEntry(MemEntryImpl* parent, int child_id, |
| - net::NetLog* net_log) { |
| - DCHECK(!parent_); |
| - DCHECK(!child_id_); |
| - |
| - net_log_ = net::BoundNetLog::Make(net_log, |
| - net::NetLog::SOURCE_MEMORY_CACHE_ENTRY); |
| - net_log_.BeginEvent( |
| - net::NetLog::TYPE_DISK_CACHE_MEM_ENTRY_IMPL, |
| - base::Bind(&NetLogChildEntryCreationCallback, parent, child_id_)); |
| - |
| - parent_ = parent; |
| - child_id_ = child_id; |
| - Time current = Time::Now(); |
| - last_modified_ = current; |
| - last_used_ = current; |
| - // Insert this to the backend's ranking list. |
| - backend_->InsertIntoRankingList(this); |
| - return true; |
| -} |
| - |
| -MemEntryImpl* MemEntryImpl::OpenChild(int64_t offset, bool create) { |
| - DCHECK(type() == kParentEntry); |
| +MemEntryImpl* MemEntryImpl::GetChild(int64_t offset, bool create) { |
| + DCHECK_EQ(PARENT_ENTRY, type()); |
| int index = ToChildIndex(offset); |
| EntryMap::iterator i = children_->find(index); |
| - if (i != children_->end()) { |
| + if (i != children_->end()) |
| return i->second; |
| - } else if (create) { |
| - MemEntryImpl* child = new MemEntryImpl(backend_); |
| - child->InitChildEntry(this, index, net_log_.net_log()); |
| - (*children_)[index] = child; |
| - return child; |
| - } |
| - return NULL; |
| + if (create) |
| + return new MemEntryImpl(backend_, index, this, net_log_.net_log()); |
| + return nullptr; |
| } |
| int MemEntryImpl::FindNextChild(int64_t offset, int len, MemEntryImpl** child) { |
| DCHECK(child); |
| - *child = NULL; |
| + *child = nullptr; |
| int scanned_len = 0; |
| // This loop tries to find the first existing child. |
| while (scanned_len < len) { |
| // This points to the current offset in the child. |
| int current_child_offset = ToChildOffset(offset + scanned_len); |
| - MemEntryImpl* current_child = OpenChild(offset + scanned_len, false); |
| + MemEntryImpl* current_child = GetChild(offset + scanned_len, false); |
| if (current_child) { |
| int child_first_pos = current_child->child_first_pos_; |
| @@ -634,8 +594,4 @@ int MemEntryImpl::FindNextChild(int64_t offset, int len, MemEntryImpl** child) { |
| return scanned_len; |
| } |
| -void MemEntryImpl::DetachChild(int child_id) { |
| - children_->erase(child_id); |
| -} |
| - |
| } // namespace disk_cache |