Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(140)

Unified Diff: storage/browser/blob/blob_storage_context.cc

Issue 1108083002: Create blobs from Disk Cache entries. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: fix unit tests build... Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: storage/browser/blob/blob_storage_context.cc
diff --git a/storage/browser/blob/blob_storage_context.cc b/storage/browser/blob/blob_storage_context.cc
index e2cf25ff2675f86db16c3c29ef8fbce0ca7a635f..050dd0dfcebe28f647043e48cad6349b44d8ebc0 100644
--- a/storage/browser/blob/blob_storage_context.cc
+++ b/storage/browser/blob/blob_storage_context.cc
@@ -283,6 +283,9 @@ scoped_refptr<BlobDataItem> BlobStorageContext::AllocateBlobItem(
ipc_data.length());
blob_item = new BlobDataItem(element.Pass());
break;
+ case DataElement::TYPE_DISK_CACHE_ENTRY: // This type can't be sent by IPC.
+ NOTREACHED();
+ break;
default:
NOTREACHED();
break;
@@ -309,13 +312,14 @@ bool BlobStorageContext::AppendAllocatedBlobItem(
// Internal items that are fully used by the new blob (not cut by the
// offset or size) are shared between the blobs. Otherwise, the relevant
// portion of the item is copied.
+ // 5) The Disk Cache entry is denoted by the disk_cache::Entry* and the
+ // stream number. The entire stream is used.
- const DataElement& data_element = blob_item->data_element();
- uint64 length = data_element.length();
- uint64 offset = data_element.offset();
+ uint64 length = blob_item->GetLength();
+ uint64 offset = blob_item->GetOffset();
UMA_HISTOGRAM_COUNTS("Storage.Blob.StorageSizeBeforeAppend",
memory_usage_ / 1024);
- switch (data_element.type()) {
+ switch (blob_item->data_element().type()) {
case DataElement::TYPE_BYTES:
UMA_HISTOGRAM_COUNTS("Storage.BlobItemSize.Bytes", length / 1024);
DCHECK(!offset);
@@ -355,16 +359,23 @@ bool BlobStorageContext::AppendAllocatedBlobItem(
(length - offset) / 1024);
// We grab the handle to ensure it stays around while we copy it.
scoped_ptr<BlobDataHandle> src =
- GetBlobDataFromUUID(data_element.blob_uuid());
+ GetBlobDataFromUUID(blob_item->data_element().blob_uuid());
if (src) {
BlobMapEntry* other_entry =
- blob_map_.find(data_element.blob_uuid())->second;
+ blob_map_.find(blob_item->data_element().blob_uuid())->second;
DCHECK(other_entry->data);
exceeded_memory = !AppendBlob(target_blob_uuid, *other_entry->data,
offset, length, target_blob_builder);
}
break;
}
+ case DataElement::TYPE_DISK_CACHE_ENTRY: {
+ UMA_HISTOGRAM_COUNTS("Storage.BlobItemSize.CacheEntry",
+ (length - offset) / 1024);
+ target_blob_builder->AppendSharedBlobItem(
+ new ShareableBlobDataItem(target_blob_uuid, blob_item));
+ break;
+ }
default:
NOTREACHED();
break;
@@ -403,7 +414,10 @@ bool BlobStorageContext::AppendBlob(
uint64_t current_length = item_length - offset;
uint64_t new_length = current_length > length ? length : current_length;
- bool reusing_blob_item = offset == 0 && new_length == item_length;
+ // Always reuse disk cache entries, as they never have an offset or length.
dmurph 2015/05/29 19:13:43 Ah, this might be a problem. If I try to splice a
gavinp 2015/06/04 18:40:00 OK, I went through this, and I think I get it. I w
+ bool reusing_blob_item =
+ item.type() == DataElement::TYPE_DISK_CACHE_ENTRY ||
+ (offset == 0 && new_length == item_length);
UMA_HISTOGRAM_BOOLEAN("Storage.Blob.ReusedItem", reusing_blob_item);
if (reusing_blob_item) {
shareable_item->referencing_blobs().insert(target_blob_uuid);

Powered by Google App Engine
This is Rietveld 408576698