Index: storage/browser/blob/blob_storage_context.cc |
diff --git a/storage/browser/blob/blob_storage_context.cc b/storage/browser/blob/blob_storage_context.cc |
index 1577b6bd9b95940ef09c39fbf40fed9a10b9602a..c75054850ef43eb70939fb7edeaf0278d983eb7d 100644 |
--- a/storage/browser/blob/blob_storage_context.cc |
+++ b/storage/browser/blob/blob_storage_context.cc |
@@ -4,13 +4,17 @@ |
#include "storage/browser/blob/blob_storage_context.h" |
+#include <algorithm> |
+ |
#include "base/bind.h" |
#include "base/location.h" |
#include "base/logging.h" |
+#include "base/memory/scoped_ptr.h" |
#include "base/message_loop/message_loop_proxy.h" |
#include "base/metrics/histogram.h" |
#include "base/stl_util.h" |
#include "storage/browser/blob/blob_data_builder.h" |
+#include "storage/browser/blob/blob_data_handle.h" |
#include "url/gurl.h" |
namespace storage { |
@@ -39,12 +43,11 @@ static const int64 kMaxMemoryUsage = 500 * 1024 * 1024; // Half a gig. |
} // namespace |
-BlobStorageContext::BlobMapEntry::BlobMapEntry() |
- : refcount(0), flags(0) { |
+BlobStorageContext::BlobMapEntry::BlobMapEntry() : refcount(0), flags(0) { |
} |
BlobStorageContext::BlobMapEntry::BlobMapEntry(int refcount, |
- BlobDataBuilder* data) |
+ InternalBlobData::Builder* data) |
: refcount(refcount), flags(0), data_builder(data) { |
} |
@@ -55,8 +58,7 @@ bool BlobStorageContext::BlobMapEntry::IsBeingBuilt() { |
return data_builder; |
} |
-BlobStorageContext::BlobStorageContext() |
- : memory_usage_(0) { |
+BlobStorageContext::BlobStorageContext() : memory_usage_(0) { |
} |
BlobStorageContext::~BlobStorageContext() { |
@@ -80,26 +82,40 @@ scoped_ptr<BlobDataHandle> BlobStorageContext::GetBlobDataFromUUID( |
scoped_ptr<BlobDataHandle> BlobStorageContext::GetBlobDataFromPublicURL( |
const GURL& url) { |
- BlobURLMap::iterator found = public_blob_urls_.find( |
- BlobUrlHasRef(url) ? ClearBlobUrlRef(url) : url); |
+ BlobURLMap::iterator found = |
+ public_blob_urls_.find(BlobUrlHasRef(url) ? ClearBlobUrlRef(url) : url); |
if (found == public_blob_urls_.end()) |
return scoped_ptr<BlobDataHandle>(); |
return GetBlobDataFromUUID(found->second); |
} |
scoped_ptr<BlobDataHandle> BlobStorageContext::AddFinishedBlob( |
- const BlobDataBuilder& data) { |
- StartBuildingBlob(data.uuid_); |
- for (const auto& blob_item : data.items_) |
- AppendBlobDataItem(data.uuid_, *(blob_item->item_)); |
- FinishBuildingBlob(data.uuid_, data.content_type_); |
- scoped_ptr<BlobDataHandle> handle = GetBlobDataFromUUID(data.uuid_); |
- DecrementBlobRefCount(data.uuid_); |
+ BlobDataBuilder* builder) { |
+ StartBuildingBlob(builder->uuid_); |
+ BlobMap::iterator found = blob_map_.find(builder->uuid_); |
+ DCHECK(found != blob_map_.end()); |
+ BlobMapEntry* entry = found->second; |
+ InternalBlobData::Builder* target_blob_builder = entry->data_builder.get(); |
+ DCHECK(target_blob_builder); |
+ |
+ for (const auto& blob_item : builder->items_) { |
+ if (entry->flags & EXCEEDED_MEMORY) |
michaeln
2015/02/05 22:58:47
not sure this test is needed since StartBuildingBl
dmurph
2015/02/06 01:32:30
True, removed.
|
+ break; |
+ if (!AppendAllocatedBlobItem(builder->uuid_, target_blob_builder, |
+ blob_item)) { |
+ BlobEntryExceededMemory(entry); |
+ break; |
+ } |
+ } |
+ |
+ FinishBuildingBlob(builder->uuid_, builder->content_type_); |
+ scoped_ptr<BlobDataHandle> handle = GetBlobDataFromUUID(builder->uuid_); |
+ DecrementBlobRefCount(builder->uuid_); |
return handle.Pass(); |
} |
-bool BlobStorageContext::RegisterPublicBlobURL( |
- const GURL& blob_url, const std::string& uuid) { |
+bool BlobStorageContext::RegisterPublicBlobURL(const GURL& blob_url, |
+ const std::string& uuid) { |
DCHECK(!BlobUrlHasRef(blob_url)); |
DCHECK(IsInUse(uuid)); |
DCHECK(!IsUrlRegistered(blob_url)); |
@@ -123,20 +139,21 @@ scoped_ptr<BlobDataSnapshot> BlobStorageContext::CreateSnapshot( |
scoped_ptr<BlobDataSnapshot> result; |
auto found = blob_map_.find(uuid); |
DCHECK(found != blob_map_.end()) |
- << "Blob should be in map, as the handle is still around"; |
+ << "Blob " << uuid << " should be in map, as the handle is still around"; |
BlobMapEntry* entry = found->second; |
DCHECK(!entry->IsBeingBuilt()); |
- result.reset(new BlobDataSnapshot(*entry->data)); |
- return result.Pass(); |
+ const InternalBlobData& data = *entry->data; |
+ return CreateSnapshot(uuid, data); |
} |
void BlobStorageContext::StartBuildingBlob(const std::string& uuid) { |
DCHECK(!IsInUse(uuid) && !uuid.empty()); |
- blob_map_[uuid] = new BlobMapEntry(1, new BlobDataBuilder(uuid)); |
+ blob_map_[uuid] = new BlobMapEntry(1, new InternalBlobData::Builder()); |
} |
-void BlobStorageContext::AppendBlobDataItem(const std::string& uuid, |
- const DataElement& item) { |
+void BlobStorageContext::AppendBlobDataItem( |
+ const std::string& uuid, |
+ const storage::DataElement& data_element) { |
DCHECK(IsBeingBuilt(uuid)); |
BlobMap::iterator found = blob_map_.find(uuid); |
if (found == blob_map_.end()) |
@@ -144,84 +161,30 @@ void BlobStorageContext::AppendBlobDataItem(const std::string& uuid, |
BlobMapEntry* entry = found->second; |
if (entry->flags & EXCEEDED_MEMORY) |
return; |
- BlobDataBuilder* target_blob_data = entry->data_builder.get(); |
- DCHECK(target_blob_data); |
- |
- bool exceeded_memory = false; |
- |
- // The blob data is stored in the canonical way which only contains a |
- // list of Data, File, and FileSystem items. Aggregated TYPE_BLOB items |
- // are expanded into the primitive constituent types. |
- // 1) The Data item is denoted by the raw data and length. |
- // 2) The File item is denoted by the file path, the range and the expected |
- // modification time. |
- // 3) The FileSystem File item is denoted by the FileSystem URL, the range |
- // and the expected modification time. |
- // 4) The Blob items are expanded. |
- // TODO(michaeln): Would be nice to avoid copying Data items when expanding. |
+ InternalBlobData::Builder* target_blob_builder = entry->data_builder.get(); |
+ DCHECK(target_blob_builder); |
- uint64 length = item.length(); |
- DCHECK_GT(length, 0u); |
- UMA_HISTOGRAM_COUNTS("Storage.Blob.StorageSizeBeforeAppend", |
- memory_usage_ / 1024); |
- switch (item.type()) { |
- case DataElement::TYPE_BYTES: |
- UMA_HISTOGRAM_COUNTS("Storage.BlobItemSize.Bytes", length / 1024); |
- DCHECK(!item.offset()); |
- exceeded_memory = !AppendBytesItem(target_blob_data, item.bytes(), |
- static_cast<int64>(length)); |
- break; |
- case DataElement::TYPE_FILE: |
- UMA_HISTOGRAM_COUNTS("Storage.BlobItemSize.File", length / 1024); |
- AppendFileItem(target_blob_data, item.path(), item.offset(), |
- item.length(), item.expected_modification_time()); |
- break; |
- case DataElement::TYPE_FILE_FILESYSTEM: |
- UMA_HISTOGRAM_COUNTS("Storage.BlobItemSize.FileSystem", length / 1024); |
- AppendFileSystemFileItem(target_blob_data, item.filesystem_url(), |
- item.offset(), item.length(), |
- item.expected_modification_time()); |
- break; |
- case DataElement::TYPE_BLOB: { |
- UMA_HISTOGRAM_COUNTS("Storage.BlobItemSize.Blob", length / 1024); |
- // We grab the handle to ensure it stays around while we copy it. |
- scoped_ptr<BlobDataHandle> src = GetBlobDataFromUUID(item.blob_uuid()); |
- if (src) { |
- BlobMapEntry* entry = blob_map_.find(item.blob_uuid())->second; |
- DCHECK(entry->data); |
- exceeded_memory = !ExpandStorageItems(target_blob_data, *entry->data, |
- item.offset(), item.length()); |
- } |
- break; |
- } |
- default: |
- NOTREACHED(); |
- break; |
- } |
- UMA_HISTOGRAM_COUNTS("Storage.Blob.StorageSizeAfterAppend", |
- memory_usage_ / 1024); |
- |
- // If we're using too much memory, drop this blob's data. |
- // TODO(michaeln): Blob memory storage does not yet spill over to disk, |
- // as a stop gap, we'll prevent memory usage over a max amount. |
- if (exceeded_memory) { |
- memory_usage_ -= target_blob_data->GetMemoryUsage(); |
- entry->flags |= EXCEEDED_MEMORY; |
- entry->data_builder.reset(new BlobDataBuilder(uuid)); |
+ if (!CanFitDataElement(data_element)) { |
michaeln
2015/02/05 22:58:47
it might make sense to hoist the one line body of
dmurph
2015/02/06 01:32:30
Done.
|
+ BlobEntryExceededMemory(entry); |
return; |
} |
+ scoped_refptr<BlobDataItem> data_item = |
+ TransformDataElement(uuid, data_element); |
michaeln
2015/02/05 22:58:47
maybe rename this method to AllocateBlobItem(id, e
dmurph
2015/02/06 01:32:30
Done.
|
+ if (!AppendAllocatedBlobItem(uuid, target_blob_builder, data_item)) { |
+ BlobEntryExceededMemory(entry); |
+ } |
} |
-void BlobStorageContext::FinishBuildingBlob( |
- const std::string& uuid, const std::string& content_type) { |
+void BlobStorageContext::FinishBuildingBlob(const std::string& uuid, |
+ const std::string& content_type) { |
DCHECK(IsBeingBuilt(uuid)); |
BlobMap::iterator found = blob_map_.find(uuid); |
if (found == blob_map_.end()) |
return; |
BlobMapEntry* entry = found->second; |
entry->data_builder->set_content_type(content_type); |
- entry->data = entry->data_builder->BuildSnapshot().Pass(); |
- entry->data_builder.reset(); |
+ scoped_ptr<InternalBlobData::Builder> builder(entry->data_builder.release()); |
+ entry->data.reset(new InternalBlobData(builder.Pass())); |
UMA_HISTOGRAM_COUNTS("Storage.Blob.ItemCount", entry->data->items().size()); |
UMA_HISTOGRAM_BOOLEAN("Storage.Blob.ExceededMemory", |
(entry->flags & EXCEEDED_MEMORY) == EXCEEDED_MEMORY); |
@@ -247,28 +210,158 @@ void BlobStorageContext::DecrementBlobRefCount(const std::string& uuid) { |
return; |
auto* entry = found->second; |
if (--(entry->refcount) == 0) { |
+ size_t memory_freeing = 0; |
if (entry->IsBeingBuilt()) { |
- memory_usage_ -= entry->data_builder->GetMemoryUsage(); |
+ memory_freeing = entry->data_builder->GetNonsharedMemoryUsage(); |
+ entry->data_builder->RemoveBlobFromShareableItems(uuid); |
} else { |
- memory_usage_ -= entry->data->GetMemoryUsage(); |
+ memory_freeing = entry->data->GetNonsharedMemoryUsage(); |
+ entry->data->RemoveBlobFromShareableItems(uuid); |
} |
+ DCHECK_LE(memory_freeing, memory_usage_); |
+ memory_usage_ -= memory_freeing; |
delete entry; |
blob_map_.erase(found); |
} |
} |
-bool BlobStorageContext::ExpandStorageItems( |
- BlobDataBuilder* target_blob_data, |
- const BlobDataSnapshot& src_blob_data, |
- uint64 offset, |
- uint64 length) { |
- DCHECK(target_blob_data && length != static_cast<uint64>(-1)); |
+bool BlobStorageContext::CanFitDataElement(const DataElement& item) { |
+ return item.type() == DataElement::TYPE_BYTES && |
+ memory_usage_ + item.length() <= kMaxMemoryUsage; |
+} |
- const std::vector<scoped_refptr<BlobDataItem>>& items = src_blob_data.items(); |
+void BlobStorageContext::BlobEntryExceededMemory(BlobMapEntry* entry) { |
+ // If we're using too much memory, drop this blob's data. |
+ // TODO(michaeln): Blob memory storage does not yet spill over to disk, |
+ // as a stop gap, we'll prevent memory usage over a max amount. |
+ memory_usage_ -= entry->data_builder->GetNonsharedMemoryUsage(); |
+ entry->flags |= EXCEEDED_MEMORY; |
+ entry->data_builder.reset(new InternalBlobData::Builder()); |
+} |
+ |
+scoped_refptr<BlobDataItem> BlobStorageContext::TransformDataElement( |
+ const std::string& uuid, |
+ const DataElement& item) { |
+ scoped_refptr<BlobDataItem> blob_item; |
+ |
+ size_t length = item.length(); |
+ DCHECK_GT(length, 0u); |
+ scoped_ptr<DataElement> element(new DataElement()); |
+ switch (item.type()) { |
+ case DataElement::TYPE_BYTES: |
+ DCHECK(!item.offset()); |
+ element->SetToBytes(item.bytes(), length); |
+ blob_item = new BlobDataItem(element.Pass()); |
+ break; |
+ case DataElement::TYPE_FILE: |
+ element->SetToFilePathRange(item.path(), item.offset(), length, |
+ item.expected_modification_time()); |
+ blob_item = new BlobDataItem(element.Pass(), |
+ ShareableFileReference::Get(item.path())); |
+ break; |
+ case DataElement::TYPE_FILE_FILESYSTEM: |
+ element->SetToFileSystemUrlRange(item.filesystem_url(), item.offset(), |
+ length, |
+ item.expected_modification_time()); |
+ blob_item = new BlobDataItem(element.Pass()); |
+ break; |
+ case DataElement::TYPE_BLOB: |
+ // This is a temporary item that will be deconstructed later. |
+ element->SetToBlobRange(item.blob_uuid(), item.offset(), item.length()); |
+ blob_item = new BlobDataItem(element.Pass()); |
+ break; |
+ default: |
+ NOTREACHED(); |
+ break; |
+ } |
+ |
+ return blob_item; |
+} |
+ |
+bool BlobStorageContext::AppendAllocatedBlobItem( |
+ const std::string& target_blob_uuid, |
+ InternalBlobData::Builder* target_blob_builder, |
+ scoped_refptr<BlobDataItem> blob_item) { |
+ bool exceeded_memory = false; |
+ |
+ // The blob data is stored in the canonical way which only contains a |
+ // list of Data, File, and FileSystem items. Aggregated TYPE_BLOB items |
+ // are expanded into the primitive constituent types. |
+ // 1) The Data item is denoted by the raw data and length. |
+ // 2) The File item is denoted by the file path, the range and the expected |
+ // modification time. |
+ // 3) The FileSystem File item is denoted by the FileSystem URL, the range |
+ // and the expected modification time. |
+ // 4) The Blob items are expanded. |
+ |
+ const DataElement& data_element = blob_item->data_element(); |
+ size_t length = data_element.length(); |
+ size_t offset = data_element.offset(); |
michaeln
2015/02/05 22:58:47
these can be len/offsets into files so int64 as th
dmurph
2015/02/06 01:32:30
Done
|
+ DCHECK_GT(length, 0u); |
+ UMA_HISTOGRAM_COUNTS("Storage.Blob.StorageSizeBeforeAppend", |
+ memory_usage_ / 1024); |
+ switch (data_element.type()) { |
+ case DataElement::TYPE_BYTES: |
+ UMA_HISTOGRAM_COUNTS("Storage.BlobItemSize.Bytes", length / 1024); |
+ DCHECK(!offset); |
+ if (memory_usage_ + length > kMaxMemoryUsage) { |
+ exceeded_memory = true; |
+ break; |
+ } |
+ memory_usage_ += length; |
+ target_blob_builder->AppendSharedBlobItem( |
+ new ShareableBlobDataItem(target_blob_uuid, blob_item)); |
+ break; |
+ case DataElement::TYPE_FILE: |
+ UMA_HISTOGRAM_COUNTS("Storage.BlobItemSize.File", |
+ (length - offset) / 1024); |
+ target_blob_builder->AppendSharedBlobItem( |
+ new ShareableBlobDataItem(target_blob_uuid, blob_item)); |
+ break; |
+ case DataElement::TYPE_FILE_FILESYSTEM: |
+ UMA_HISTOGRAM_COUNTS("Storage.BlobItemSize.FileSystem", |
+ (length - offset) / 1024); |
+ target_blob_builder->AppendSharedBlobItem( |
+ new ShareableBlobDataItem(target_blob_uuid, blob_item)); |
+ break; |
+ case DataElement::TYPE_BLOB: { |
+ UMA_HISTOGRAM_COUNTS("Storage.BlobItemSize.Blob", |
+ (length - offset) / 1024); |
+ // We grab the handle to ensure it stays around while we copy it. |
+ scoped_ptr<BlobDataHandle> src = |
+ GetBlobDataFromUUID(data_element.blob_uuid()); |
+ if (src) { |
+ BlobMapEntry* other_entry = |
+ blob_map_.find(data_element.blob_uuid())->second; |
+ DCHECK(other_entry->data); |
+ exceeded_memory = !AppendBlob(target_blob_uuid, target_blob_builder, |
+ *other_entry->data, offset, length); |
+ } |
+ break; |
+ } |
+ default: |
+ NOTREACHED(); |
+ break; |
+ } |
+ UMA_HISTOGRAM_COUNTS("Storage.Blob.StorageSizeAfterAppend", |
+ memory_usage_ / 1024); |
+ |
+ return !exceeded_memory; |
+} |
+ |
+bool BlobStorageContext::AppendBlob( |
+ const std::string& target_blob_uuid, |
+ InternalBlobData::Builder* target_blob_builder, |
+ const InternalBlobData& blob, |
+ size_t offset, |
+ size_t length) { |
+ DCHECK(length > 0); |
+ |
+ const std::vector<scoped_refptr<ShareableBlobDataItem>>& items = blob.items(); |
auto iter = items.begin(); |
if (offset) { |
for (; iter != items.end(); ++iter) { |
- const BlobDataItem& item = *(iter->get()); |
+ const BlobDataItem& item = *(iter->get()->item()); |
if (offset >= item.length()) |
offset -= item.length(); |
else |
@@ -277,24 +370,62 @@ bool BlobStorageContext::ExpandStorageItems( |
} |
for (; iter != items.end() && length > 0; ++iter) { |
- const BlobDataItem& item = *(iter->get()); |
- uint64 current_length = item.length() - offset; |
- uint64 new_length = current_length > length ? length : current_length; |
- if (iter->get()->type() == DataElement::TYPE_BYTES) { |
- if (!AppendBytesItem( |
- target_blob_data, |
- item.bytes() + static_cast<size_t>(item.offset() + offset), |
- static_cast<int64>(new_length))) { |
- return false; // exceeded memory |
- } |
- } else if (item.type() == DataElement::TYPE_FILE) { |
- AppendFileItem(target_blob_data, item.path(), item.offset() + offset, |
- new_length, item.expected_modification_time()); |
- } else { |
- DCHECK(item.type() == DataElement::TYPE_FILE_FILESYSTEM); |
- AppendFileSystemFileItem(target_blob_data, item.filesystem_url(), |
- item.offset() + offset, new_length, |
- item.expected_modification_time()); |
+ scoped_refptr<ShareableBlobDataItem> shareable_item = iter->get(); |
+ const BlobDataItem& item = *(shareable_item->item()); |
+ size_t item_length = item.length(); |
+ DCHECK_GT(item_length, offset); |
+ size_t current_length = item.length() - offset; |
+ size_t new_length = current_length > length ? length : current_length; |
+ |
+ bool reusing_blob_item = offset == 0 && new_length == item.length(); |
+ UMA_HISTOGRAM_BOOLEAN("Storage.Blob.ReusedItem", reusing_blob_item); |
+ if (reusing_blob_item) { |
michaeln
2015/02/05 20:02:09
woohooo!
dmurph
2015/02/06 01:32:30
:)
|
+ shareable_item->referencing_blobs().insert(target_blob_uuid); |
+ target_blob_builder->AppendSharedBlobItem(shareable_item); |
+ length -= new_length; |
+ continue; |
+ } |
+ |
+ // We need to do copying of the items when we have a different offset or |
+ // length |
+ switch (item.type()) { |
+ case DataElement::TYPE_BYTES: { |
+ UMA_HISTOGRAM_COUNTS("Storage.BlobItemSize.Blob.Bytes", |
+ new_length / 1024); |
+ if (memory_usage_ + new_length > kMaxMemoryUsage) { |
+ return false; |
+ } |
+ DCHECK(!item.offset()); |
+ scoped_ptr<DataElement> element(new DataElement()); |
+ element->SetToBytes(item.bytes() + offset, |
+ static_cast<int64>(new_length)); |
+ memory_usage_ += new_length; |
+ target_blob_builder->AppendSharedBlobItem(new ShareableBlobDataItem( |
+ target_blob_uuid, new BlobDataItem(element.Pass()))); |
+ } break; |
+ case DataElement::TYPE_FILE: { |
+ UMA_HISTOGRAM_COUNTS("Storage.BlobItemSize.Blob.File", |
+ new_length / 1024); |
+ scoped_ptr<DataElement> element(new DataElement()); |
+ element->SetToFilePathRange(item.path(), item.offset() + offset, |
+ new_length, |
+ item.expected_modification_time()); |
+ target_blob_builder->AppendSharedBlobItem(new ShareableBlobDataItem( |
+ target_blob_uuid, |
+ new BlobDataItem(element.Pass(), item.file_handle_))); |
+ } break; |
+ case DataElement::TYPE_FILE_FILESYSTEM: { |
+ UMA_HISTOGRAM_COUNTS("Storage.BlobItemSize.Blob.FileSystem", |
+ new_length / 1024); |
+ scoped_ptr<DataElement> element(new DataElement()); |
+ element->SetToFileSystemUrlRange(item.filesystem_url(), |
+ item.offset() + offset, new_length, |
+ item.expected_modification_time()); |
+ target_blob_builder->AppendSharedBlobItem(new ShareableBlobDataItem( |
+ target_blob_uuid, new BlobDataItem(element.Pass()))); |
+ } break; |
+ default: |
+ CHECK(false) << "Illegal blob item type: " << item.type(); |
} |
length -= new_length; |
offset = 0; |
@@ -302,45 +433,6 @@ bool BlobStorageContext::ExpandStorageItems( |
return true; |
} |
-bool BlobStorageContext::AppendBytesItem(BlobDataBuilder* target_blob_data, |
- const char* bytes, |
- int64 length) { |
- if (length < 0) { |
- DCHECK(false); |
- return false; |
- } |
- if (memory_usage_ + length > kMaxMemoryUsage) { |
- return false; |
- } |
- target_blob_data->AppendData(bytes, static_cast<size_t>(length)); |
- memory_usage_ += length; |
- return true; |
-} |
- |
-void BlobStorageContext::AppendFileItem( |
- BlobDataBuilder* target_blob_data, |
- const base::FilePath& file_path, |
- uint64 offset, |
- uint64 length, |
- const base::Time& expected_modification_time) { |
- // It may be a temporary file that should be deleted when no longer needed. |
- scoped_refptr<ShareableFileReference> shareable_file = |
- ShareableFileReference::Get(file_path); |
- |
- target_blob_data->AppendFile(file_path, offset, length, |
- expected_modification_time, shareable_file); |
-} |
- |
-void BlobStorageContext::AppendFileSystemFileItem( |
- BlobDataBuilder* target_blob_data, |
- const GURL& filesystem_url, |
- uint64 offset, |
- uint64 length, |
- const base::Time& expected_modification_time) { |
- target_blob_data->AppendFileSystemFile(filesystem_url, offset, length, |
- expected_modification_time); |
-} |
- |
bool BlobStorageContext::IsInUse(const std::string& uuid) { |
return blob_map_.find(uuid) != blob_map_.end(); |
} |
@@ -356,4 +448,18 @@ bool BlobStorageContext::IsUrlRegistered(const GURL& blob_url) { |
return public_blob_urls_.find(blob_url) != public_blob_urls_.end(); |
} |
+scoped_ptr<BlobDataSnapshot> BlobStorageContext::CreateSnapshot( |
+ const std::string& uuid, |
+ const InternalBlobData& data) { |
+ scoped_ptr<BlobDataSnapshot> snapshot(new BlobDataSnapshot( |
+ uuid, data.content_type(), data.content_disposition())); |
+ snapshot->items_.resize(data.items_.size()); |
+ std::transform(data.items_.begin(), data.items_.end(), |
michaeln
2015/02/05 20:02:09
might be easier to read as a for (const auto& shar
dmurph
2015/02/06 01:32:30
Fine! :)
|
+ snapshot->items_.begin(), |
+ [](const scoped_refptr<ShareableBlobDataItem>& shareableItem) { |
michaeln
2015/02/05 20:02:09
shareableItem s/b shareable_item
dmurph
2015/02/06 01:32:30
Done.
|
+ return shareableItem->item(); |
+ }); |
+ return snapshot; |
+} |
+ |
} // namespace storage |