Index: storage/browser/blob/blob_storage_context.cc |
diff --git a/storage/browser/blob/blob_storage_context.cc b/storage/browser/blob/blob_storage_context.cc |
index e2cf25ff2675f86db16c3c29ef8fbce0ca7a635f..cbfe2bbe671c06003fc020c0db4f1827fdde5f04 100644 |
--- a/storage/browser/blob/blob_storage_context.cc |
+++ b/storage/browser/blob/blob_storage_context.cc |
@@ -95,8 +95,8 @@ scoped_ptr<BlobDataHandle> BlobStorageContext::AddFinishedBlob( |
BlobDataBuilder* external_builder) { |
TRACE_EVENT0("Blob", "Context::AddFinishedBlob"); |
StartBuildingBlob(external_builder->uuid_); |
+ DCHECK_EQ(1U, blob_map_.count(external_builder->uuid_)); |
jkarlin
2015/05/29 14:59:41
Why this change?
gavinp
2015/05/29 18:06:07
I don't like doing DCHECK with == or != in them; i
|
BlobMap::iterator found = blob_map_.find(external_builder->uuid_); |
- DCHECK(found != blob_map_.end()); |
BlobMapEntry* entry = found->second; |
InternalBlobData::Builder* target_blob_builder = entry->data_builder.get(); |
DCHECK(target_blob_builder); |
@@ -283,6 +283,7 @@ scoped_refptr<BlobDataItem> BlobStorageContext::AllocateBlobItem( |
ipc_data.length()); |
blob_item = new BlobDataItem(element.Pass()); |
break; |
+ case DataElement::TYPE_DISK_CACHE_ENTRY: // This type can't be sent by IPC. |
jkarlin
2015/05/29 14:59:42
Add NOTREACHED().
gavinp
2015/05/29 18:06:07
Done.
|
default: |
NOTREACHED(); |
break; |
@@ -310,12 +311,11 @@ bool BlobStorageContext::AppendAllocatedBlobItem( |
// offset or size) are shared between the blobs. Otherwise, the relevant |
// portion of the item is copied. |
- const DataElement& data_element = blob_item->data_element(); |
- uint64 length = data_element.length(); |
- uint64 offset = data_element.offset(); |
+ uint64 length = blob_item->GetLength(); |
+ uint64 offset = blob_item->GetOffset(); |
UMA_HISTOGRAM_COUNTS("Storage.Blob.StorageSizeBeforeAppend", |
memory_usage_ / 1024); |
- switch (data_element.type()) { |
+ switch (blob_item->data_element().type()) { |
case DataElement::TYPE_BYTES: |
UMA_HISTOGRAM_COUNTS("Storage.BlobItemSize.Bytes", length / 1024); |
DCHECK(!offset); |
@@ -355,16 +355,23 @@ bool BlobStorageContext::AppendAllocatedBlobItem( |
(length - offset) / 1024); |
// We grab the handle to ensure it stays around while we copy it. |
scoped_ptr<BlobDataHandle> src = |
- GetBlobDataFromUUID(data_element.blob_uuid()); |
+ GetBlobDataFromUUID(blob_item->data_element().blob_uuid()); |
if (src) { |
BlobMapEntry* other_entry = |
- blob_map_.find(data_element.blob_uuid())->second; |
+ blob_map_.find(blob_item->data_element().blob_uuid())->second; |
DCHECK(other_entry->data); |
exceeded_memory = !AppendBlob(target_blob_uuid, *other_entry->data, |
offset, length, target_blob_builder); |
} |
break; |
} |
+ case DataElement::TYPE_DISK_CACHE_ENTRY: { |
+ UMA_HISTOGRAM_COUNTS("Storage.BlobItemSize.CacheEntry", |
+ (length - offset) / 1024); |
+ target_blob_builder->AppendSharedBlobItem( |
+ new ShareableBlobDataItem(target_blob_uuid, blob_item)); |
+ break; |
+ } |
default: |
NOTREACHED(); |
break; |
@@ -403,7 +410,10 @@ bool BlobStorageContext::AppendBlob( |
uint64_t current_length = item_length - offset; |
uint64_t new_length = current_length > length ? length : current_length; |
- bool reusing_blob_item = offset == 0 && new_length == item_length; |
+ // Always reuse disk cache entries, as they never have an offset or length. |
+ bool reusing_blob_item = |
+ item.type() == DataElement::TYPE_DISK_CACHE_ENTRY || |
+ (offset == 0 && new_length == item_length); |
UMA_HISTOGRAM_BOOLEAN("Storage.Blob.ReusedItem", reusing_blob_item); |
if (reusing_blob_item) { |
shareable_item->referencing_blobs().insert(target_blob_uuid); |