Chromium Code Reviews| Index: storage/browser/blob/blob_async_transport_strategy.cc |
| diff --git a/storage/browser/blob/blob_async_transport_strategy.cc b/storage/browser/blob/blob_async_transport_strategy.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..bf1a535f0890f27ae9e4b83b22175bb1f2287dc4 |
| --- /dev/null |
| +++ b/storage/browser/blob/blob_async_transport_strategy.cc |
| @@ -0,0 +1,308 @@ |
| +// Copyright 2015 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include <algorithm> |
| + |
| +#include "storage/browser/blob/blob_async_transport_strategy.h" |
| +#include "storage/common/blob_storage/blob_storage_constants.h" |
| + |
| +namespace storage { |
| +namespace { |
| +bool IsBytes(DataElement::Type type) { |
| + return type == DataElement::TYPE_BYTES || |
| + type == DataElement::TYPE_BYTES_DESCRIPTION; |
| +} |
| +} // namespace |
| + |
| +// This class handles the logic of how transported memory is going to be |
| +// represented as storage in the browser. The main idea is that all the memory |
| +// is now packed into file chunks, and the browser items will just reference |
| +// the file with offsets and sizes. |
| +class FileStorageStrategy |
| + : public BlobAsyncTransportStrategy::BlobSegmentVisitor<uint64_t> { |
| + public: |
| + typedef uint64_t SizeType; |
| + |
| + FileStorageStrategy( |
| + std::vector<BlobAsyncTransportStrategy::RendererMemoryItemRequest>* |
| + requests, |
| + BlobDataBuilder* builder) |
| + : requests(requests), builder(builder), current_storage_item(0) {} |
| + |
| + ~FileStorageStrategy() override {} |
| + |
| + void VisitBytesSegment(size_t element_index, |
| + SizeType element_offset, |
| + size_t segment_index, |
| + SizeType segment_offset, |
| + SizeType size) override { |
| + BlobAsyncTransportStrategy::RendererMemoryItemRequest request; |
| + request.browser_item_index = current_storage_item; |
| + request.browser_item_offset = 0; |
| + request.message.request_number = requests->size(); |
| + request.message.transport_strategy = IPCBlobItemRequestStrategy::FILE; |
| + request.message.renderer_item_index = element_index; |
| + request.message.renderer_item_offset = element_offset; |
| + request.message.size = size; |
| + request.message.handle_index = segment_index; |
| + request.message.handle_offset = segment_offset; |
| + |
| + requests->push_back(request); |
| + builder->AppendFutureFile(segment_offset, size); |
|
michaeln
2015/11/21 00:59:45
AppendFuture and PopulateFuture, nice choice for n
|
| + current_storage_item++; |
| + } |
| + |
| + void VisitNonBytesSegment(const DataElement& element, |
| + size_t element_index) override { |
| + builder->AppendIPCDataElement(element); |
| + current_storage_item++; |
| + } |
| + |
| + void Done() override {} |
| + |
| + std::vector<BlobAsyncTransportStrategy::RendererMemoryItemRequest>* requests; |
| + BlobDataBuilder* builder; |
| + |
| + size_t current_storage_item; |
| +}; |
| + |
| +// This class handles the logic of storing memory that is transported as |
| +// consolidated shared memory. The main hurdle is to re-separate memory blocks |
| +// that cross file or blob boundaries. |
|
michaeln
2015/11/21 00:59:45
when are blob boundaries crossed?
dmurph
2015/11/23 20:07:02
Blob reference boundaries.
|
| +class SharedMemoryStorageStrategy |
| + : public BlobAsyncTransportStrategy::BlobSegmentVisitor<size_t> { |
| + public: |
| + typedef size_t SizeType; |
| + |
| + SharedMemoryStorageStrategy( |
| + SizeType max_segment_size, |
| + std::vector<BlobAsyncTransportStrategy::RendererMemoryItemRequest>* |
| + requests, |
| + BlobDataBuilder* builder) |
| + : requests(requests), |
| + max_segment_size(max_segment_size), |
| + storage_element_offset(0), |
| + builder(builder), |
| + current_item_size(0), |
| + current_storage_item(0) {} |
| + ~SharedMemoryStorageStrategy() override {} |
| + |
| + void VisitBytesSegment(size_t element_index, |
| + SizeType element_offset, |
| + size_t segment_index, |
| + SizeType segment_offset, |
| + SizeType size) override { |
| + if (storage_element_offset + size > max_segment_size) { |
| + builder->AppendFutureData(current_item_size); |
| + current_storage_item++; |
| + current_item_size = 0; |
| + storage_element_offset = 0; |
| + } |
| + BlobAsyncTransportStrategy::RendererMemoryItemRequest request; |
| + request.browser_item_index = current_storage_item; |
| + request.browser_item_offset = storage_element_offset; |
| + request.message.request_number = requests->size(); |
| + request.message.transport_strategy = |
| + IPCBlobItemRequestStrategy::SHARED_MEMORY; |
| + request.message.renderer_item_index = element_index; |
| + request.message.renderer_item_offset = element_offset; |
| + request.message.size = size; |
| + request.message.handle_index = segment_index; |
| + request.message.handle_offset = segment_offset; |
| + |
| + requests->push_back(request); |
| + storage_element_offset += size; |
| + current_item_size += size; |
| + }; |
| + |
| + void VisitNonBytesSegment(const DataElement& element, |
| + size_t element_index) override { |
| + builder->AppendFutureData(current_item_size); |
|
michaeln
2015/11/21 00:59:45
what if current_item_size is 0?
dmurph
2015/11/23 20:07:02
Fixed, and added test.
|
| + current_storage_item++; |
| + builder->AppendIPCDataElement(element); |
| + current_storage_item++; |
| + storage_element_offset = 0; |
| + current_item_size = 0; |
| + } |
| + |
| + void Done() override { |
| + if (current_item_size != 0) { |
| + builder->AppendFutureData(current_item_size); |
| + } |
| + } |
| + |
| + std::vector<BlobAsyncTransportStrategy::RendererMemoryItemRequest>* requests; |
| + |
| + SizeType max_segment_size; |
| + SizeType storage_element_offset; |
| + BlobDataBuilder* builder; |
| + SizeType current_item_size; |
| + size_t current_storage_item; |
|
michaeln
2015/11/21 00:59:45
naminga this as current_item_index might help
|
| +}; |
| + |
| +BlobAsyncTransportStrategy::RendererMemoryItemRequest:: |
| + RendererMemoryItemRequest() |
| + : browser_item_index(0), browser_item_offset(0), received(false) {} |
| + |
| +BlobAsyncTransportStrategy::BlobAsyncTransportStrategy() |
| + : error_(BlobAsyncTransportStrategy::ERROR_NONE), total_bytes_size_(0) {} |
| + |
| +BlobAsyncTransportStrategy::~BlobAsyncTransportStrategy() {} |
| + |
| +// if total_blob_size > kMaxBlobSize (say 400MB) |
| +// Request all data in files |
| +// (Segment all of the existing data into |
| +// file blocks, of <= kMaxFileSize) |
| +// else if total_blob_size > kMaxIPCSize (this is 150KB) |
| +// Request all data in shared memory |
| +// (Segment all of the existing data into |
| +// shared memory blocks, of <= kMaxSharedMemorySize) |
| +// else |
| +// Request all data to be sent over IPC |
| +void BlobAsyncTransportStrategy::Initialize( |
|
michaeln
2015/11/21 00:59:45
this method reads a lot nicer now, thnx!
|
| + size_t max_ipc_memory_size, |
| + size_t max_shared_memory_size, |
| + uint64_t max_file_size, |
| + uint64_t disk_space_left, |
| + size_t memory_available, |
| + const std::string& uuid, |
| + const std::vector<DataElement>& blob_item_infos) { |
| + file_handle_sizes_.clear(); |
| + shared_memory_handle_sizes_.clear(); |
| + requests_.clear(); |
| + builder_.reset(new BlobDataBuilder(uuid)); |
| + error_ = BlobAsyncTransportStrategy::ERROR_NONE; |
|
kinuko
2015/11/20 15:19:42
I think I've once asked this but is an instance of
dmurph
2015/11/20 22:10:05
Sounds good, I'll add DCHECKs then.
|
| + |
| + size_t memory_items = 0; |
| + total_bytes_size_ = 0; |
| + for (const auto& info : blob_item_infos) { |
| + if (!IsBytes(info.type())) { |
| + continue; |
| + } |
| + total_bytes_size_ += info.length(); |
| + ++memory_items; |
| + } |
| + |
| + // See if we have enough memory |
| + if (total_bytes_size_ > |
| + disk_space_left + static_cast<uint64_t>(memory_available)) { |
| + error_ = BlobAsyncTransportStrategy::ERROR_TOO_LARGE; |
| + return; |
| + } |
| + |
| + // If we're more than the available memory, then we're going straight to disk. |
| + if (total_bytes_size_ > memory_available) { |
| + // First, handle the case where we go to disk. |
|
michaeln
2015/11/21 00:59:45
redundant comment?
dmurph
2015/11/23 20:07:02
Done.
|
| + if (total_bytes_size_ < disk_space_left) { |
| + ComputeHandleSizes(total_bytes_size_, max_file_size, &file_handle_sizes_); |
| + FileStorageStrategy strategy(&requests_, builder_.get()); |
| + ForEachWithSegment(blob_item_infos, max_file_size, &strategy); |
| + return; |
| + } |
| + error_ = BlobAsyncTransportStrategy::ERROR_TOO_LARGE; |
| + return; |
|
kinuko
2015/11/20 15:19:42
nit: prefer early return (handle error case first)
dmurph
2015/11/20 22:10:05
Done.
|
| + } |
| + |
| + if (total_bytes_size_ > max_ipc_memory_size) { |
| + CHECK_LE(total_bytes_size_, std::numeric_limits<size_t>::max()); |
| + ComputeHandleSizes(static_cast<size_t>(total_bytes_size_), |
| + max_shared_memory_size, &shared_memory_handle_sizes_); |
| + SharedMemoryStorageStrategy strategy(max_shared_memory_size, &requests_, |
| + builder_.get()); |
| + ForEachWithSegment(blob_item_infos, max_shared_memory_size, &strategy); |
| + return; |
| + } |
|
kinuko
2015/11/20 15:19:42
nit: let's add one empty line here
dmurph
2015/11/20 22:10:05
Done.
|
| + // Since they can all fit in IPC memory, we don't need to segment anything, |
| + // and just request them straight in IPC. |
| + size_t items_length = blob_item_infos.size(); |
| + for (size_t i = 0; i < items_length; i++) { |
| + const auto& info = blob_item_infos.at(i); |
| + if (!IsBytes(info.type())) { |
| + builder_->AppendIPCDataElement(info); |
| + continue; |
| + } |
| + BlobAsyncTransportStrategy::RendererMemoryItemRequest request; |
| + request.browser_item_index = i; |
| + request.browser_item_offset = 0; |
| + request.message.request_number = requests_.size(); |
| + request.message.transport_strategy = IPCBlobItemRequestStrategy::IPC; |
| + request.message.renderer_item_index = i; |
| + request.message.renderer_item_offset = 0; |
| + request.message.size = info.length(); |
| + requests_.push_back(request); |
| + builder_->AppendFutureData(info.length()); |
| + } |
| +} |
| + |
| +// Splits each |element| into one or more |segments| of a max_size, invokes the |
| +// strategy to determine the request to make for each |segment| produced. A |
| +// |segment| can also span multiple |elements|. |
| +/* static */ |
| +template <typename SizeType> |
| +void BlobAsyncTransportStrategy::ForEachWithSegment( |
| + const std::vector<DataElement>& elements, |
| + SizeType max_segment_size, |
| + BlobSegmentVisitor<SizeType>* visitor) { |
| + DCHECK_GT(max_segment_size, 0ull); |
| + size_t segment_index = 0; |
| + SizeType segment_offset = 0; |
| + size_t elements_length = elements.size(); |
| + for (size_t element_index = 0; element_index < elements_length; |
| + ++element_index) { |
| + const auto& element = elements.at(element_index); |
| + DataElement::Type type = element.type(); |
| + if (!IsBytes(type)) { |
| + visitor->VisitNonBytesSegment(element, element_index); |
| + continue; |
| + } |
| + SizeType element_memory_left = element.length(); |
| + SizeType element_offset = 0; |
| + while (element_memory_left > 0) { |
| + if (segment_offset == max_segment_size) { |
| + ++segment_index; |
| + segment_offset = 0; |
| + } |
| + SizeType memory_writing = |
| + std::min(max_segment_size - segment_offset, element_memory_left); |
| + visitor->VisitBytesSegment(element_index, element_offset, segment_index, |
| + segment_offset, memory_writing); |
| + element_memory_left -= memory_writing; |
| + segment_offset += memory_writing; |
| + element_offset += memory_writing; |
| + } |
| + } |
| + visitor->Done(); |
| +} |
| + |
| +/* static */ |
| +bool BlobAsyncTransportStrategy::ShouldBeShortcut( |
| + const std::vector<DataElement>& elements, |
| + size_t memory_available) { |
| + size_t shortcut_bytes = 0; |
| + for (const auto& element : elements) { |
| + DataElement::Type type = element.type(); |
| + if (type == DataElement::TYPE_BYTES_DESCRIPTION) { |
| + return false; |
| + } |
| + if (type == DataElement::TYPE_BYTES) { |
| + shortcut_bytes += element.length(); |
| + } |
| + } |
| + return shortcut_bytes <= memory_available; |
| +} |
| + |
| +/* static */ |
| +template <typename SizeType> |
| +void BlobAsyncTransportStrategy::ComputeHandleSizes( |
| + SizeType total_memory_size, |
| + SizeType max_segment_size, |
| + std::vector<SizeType>* segment_sizes) { |
|
kinuko
2015/11/20 15:19:42
nit: probably resize the segment_sizes first befor
dmurph
2015/11/20 22:10:05
Done.
dmurph
2015/11/20 22:10:05
Done.
|
| + segment_sizes->insert(segment_sizes->begin(), |
| + total_memory_size / max_segment_size, max_segment_size); |
| + if (total_memory_size % max_segment_size > 0) { |
| + segment_sizes->push_back(total_memory_size % max_segment_size); |
| + } |
| +} |
| + |
| +} // namespace storage |