Index: storage/browser/blob/blob_storage_context.cc |
diff --git a/storage/browser/blob/blob_storage_context.cc b/storage/browser/blob/blob_storage_context.cc |
index 45c4f8c5a8a273dbc77f41b57225f1049617e5c0..caf01df0857cc6ae09e47f73f062fd30906391fe 100644 |
--- a/storage/browser/blob/blob_storage_context.cc |
+++ b/storage/browser/blob/blob_storage_context.cc |
@@ -95,8 +95,8 @@ scoped_ptr<BlobDataHandle> BlobStorageContext::AddFinishedBlob( |
BlobDataBuilder* external_builder) { |
TRACE_EVENT0("Blob", "Context::AddFinishedBlob"); |
StartBuildingBlob(external_builder->uuid_); |
+ DCHECK_EQ(1U, blob_map_.count(external_builder->uuid_)); |
BlobMap::iterator found = blob_map_.find(external_builder->uuid_); |
- DCHECK(found != blob_map_.end()); |
BlobMapEntry* entry = found->second; |
InternalBlobData::Builder* target_blob_builder = entry->data_builder.get(); |
DCHECK(target_blob_builder); |
@@ -283,6 +283,9 @@ scoped_refptr<BlobDataItem> BlobStorageContext::AllocateBlobItem( |
ipc_data.length()); |
blob_item = new BlobDataItem(element.Pass()); |
break; |
+ case DataElement::TYPE_DISK_CACHE_ENTRY: |
+ NOTREACHED(); // XYZZY figer this out |
+ break; |
default: |
NOTREACHED(); |
break; |
@@ -310,12 +313,11 @@ bool BlobStorageContext::AppendAllocatedBlobItem( |
// offset or size) are shared between the blobs. Otherwise, the relevant |
// portion of the item is copied. |
- const DataElement& data_element = blob_item->data_element(); |
- uint64 length = data_element.length(); |
- uint64 offset = data_element.offset(); |
+ uint64 length = blob_item->GetLength(); |
+ uint64 offset = blob_item->GetOffset(); |
UMA_HISTOGRAM_COUNTS("Storage.Blob.StorageSizeBeforeAppend", |
memory_usage_ / 1024); |
- switch (data_element.type()) { |
+ switch (blob_item->data_element().type()) { |
case DataElement::TYPE_BYTES: |
UMA_HISTOGRAM_COUNTS("Storage.BlobItemSize.Bytes", length / 1024); |
DCHECK(!offset); |
@@ -355,16 +357,23 @@ bool BlobStorageContext::AppendAllocatedBlobItem( |
(length - offset) / 1024); |
// We grab the handle to ensure it stays around while we copy it. |
scoped_ptr<BlobDataHandle> src = |
- GetBlobDataFromUUID(data_element.blob_uuid()); |
+ GetBlobDataFromUUID(blob_item->data_element().blob_uuid()); |
if (src) { |
BlobMapEntry* other_entry = |
- blob_map_.find(data_element.blob_uuid())->second; |
+ blob_map_.find(blob_item->data_element().blob_uuid())->second; |
DCHECK(other_entry->data); |
exceeded_memory = !AppendBlob(target_blob_uuid, *other_entry->data, |
offset, length, target_blob_builder); |
} |
break; |
} |
+ case DataElement::TYPE_DISK_CACHE_ENTRY: { |
+ UMA_HISTOGRAM_COUNTS("Storage.BlobItemSize.CacheEntry", |
+ (length - offset) / 1024); |
+ target_blob_builder->AppendSharedBlobItem( |
+ new ShareableBlobDataItem(target_blob_uuid, blob_item)); |
+ break; |
+ } |
default: |
NOTREACHED(); |
break; |
@@ -452,6 +461,9 @@ bool BlobStorageContext::AppendBlob( |
target_blob_builder->AppendSharedBlobItem(new ShareableBlobDataItem( |
target_blob_uuid, new BlobDataItem(element.Pass()))); |
} break; |
+ case DataElement::TYPE_DISK_CACHE_ENTRY: |
+ CHECK(false) << "Can't append one of these yo"; |
michaeln
2015/06/12 22:35:09
what's to prevent this?
cache.match(x).then(funct
|
+ |
default: |
CHECK(false) << "Illegal blob item type: " << item.type(); |
} |