Chromium Code Reviews| Index: content/child/blob_storage/blob_consolidation.cc |
| diff --git a/content/child/blob_storage/blob_consolidation.cc b/content/child/blob_storage/blob_consolidation.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..cd98614f41dcdc169486a41bd84945e0962deb1a |
| --- /dev/null |
| +++ b/content/child/blob_storage/blob_consolidation.cc |
| @@ -0,0 +1,169 @@ |
| +// Copyright 2015 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include <string> |
| + |
| +#include "content/child/blob_storage/blob_consolidation.h" |
| + |
| +using storage::DataElement; |
| +using blink::WebBlobData; |
| +using blink::WebString; |
| +using blink::WebThreadSafeData; |
| +using blink::WebURL; |
| + |
| +namespace content { |
| + |
| +using ReadStatus = BlobConsolidation::ReadStatus; |
| + |
| +BlobConsolidation::ConsolidatedItem::ConsolidatedItem() |
| + : type(DataElement::TYPE_UNKNOWN), |
| + offset(0), |
| + length(kuint64max), |
| + expected_modification_time(0) {} |
| +BlobConsolidation::ConsolidatedItem::~ConsolidatedItem() {} |
| + |
| +BlobConsolidation::ConsolidatedItem::ConsolidatedItem(DataElement::Type type, |
| + uint64_t offset, |
| + uint64_t length) |
| + : type(type), |
| + offset(offset), |
| + length(length), |
| + expected_modification_time(0) {} |
| + |
| +BlobConsolidation::BlobConsolidation() |
| + : total_memory_(0), total_memory_read_(0) {} |
| + |
| +BlobConsolidation::~BlobConsolidation() {} |
| + |
| +void BlobConsolidation::AddDataItem(const WebThreadSafeData& data) { |
| + ConsolidatedItem* item; |
| + if (consolidated_items_.empty() || |
| + consolidated_items_.back().type != DataElement::TYPE_BYTES) { |
| + consolidated_items_.push_back( |
| + ConsolidatedItem(DataElement::TYPE_BYTES, 0, 0)); |
| + } |
| + item = &consolidated_items_.back(); |
| + if (!item->data.empty()) { |
| + item->offsets.push_back(static_cast<size_t>(item->length)); |
| + } |
| + item->length += data.size(); |
| + total_memory_ += data.size(); |
| + item->data.push_back(WebThreadSafeData()); |
| + item->data.back().assign(data); |
| +} |
| + |
| +void BlobConsolidation::AddFileItem(const WebString& path, uint64_t offset, |
| + uint64_t length, |
| + double expected_modification_time) { |
| + consolidated_items_.push_back( |
| + ConsolidatedItem(DataElement::TYPE_FILE, offset, length)); |
| + ConsolidatedItem& item = consolidated_items_.back(); |
| + item.path.assign(path); |
| + item.expected_modification_time = expected_modification_time; |
| +} |
| + |
| +void BlobConsolidation::AddBlobItem(const WebString& uuid, uint64_t offset, |
| + uint64_t length) { |
| + consolidated_items_.push_back( |
| + ConsolidatedItem(DataElement::TYPE_BLOB, offset, length)); |
| + ConsolidatedItem& item = consolidated_items_.back(); |
| + item.blob_uuid.assign(uuid); |
| +} |
| + |
| +void BlobConsolidation::AddFileSystemItem(const WebURL& url, uint64_t offset, |
| + uint64_t length, |
| + double expected_modification_time) { |
| + consolidated_items_.push_back( |
| + ConsolidatedItem(DataElement::TYPE_FILE_FILESYSTEM, offset, length)); |
| + ConsolidatedItem& item = consolidated_items_.back(); |
| + item.filesystem_url = url; |
| + item.expected_modification_time = expected_modification_time; |
| +} |
| + |
| +ReadStatus BlobConsolidation::RecordMemoryRead(size_t consolidated_item_index, |
|
michaeln
2015/06/17 03:17:55
please put the method bodies in the .cc in the sam
dmurph
2015/06/17 18:29:50
Done.
|
| + size_t memory_size) { |
| + if (consolidated_item_index >= consolidated_items_.size()) { |
| + return ReadStatus::ERROR_OUT_OF_BOUNDS; |
| + } |
| + if (total_memory_read_ + memory_size > total_memory_) { |
| + LOG(ERROR) << "Memory for blob marked as read more than once."; |
| + return ReadStatus::ERROR; |
| + } |
| + const ConsolidatedItem& item = consolidated_items_[consolidated_item_index]; |
| + if (item.type != DataElement::TYPE_BYTES) { |
| + LOG(ERROR) << "Cannot read bytes from item with type " << item.type; |
| + return ReadStatus::ERROR_WRONG_TYPE; |
| + } |
| + |
| + // Keep track of memory read. |
| + total_memory_read_ += memory_size; |
| + return total_memory_read_ < total_memory_ ? ReadStatus::BLOB_BYTES_PENDING |
| + : ReadStatus::DONE; |
| +} |
| + |
| +ReadStatus BlobConsolidation::ReadMemory(size_t consolidated_item_index, |
| + size_t consolidated_offset, |
| + size_t consolidated_size, |
| + char* memory_out) { |
| + if (consolidated_item_index >= consolidated_items_.size()) { |
| + return ReadStatus::ERROR_OUT_OF_BOUNDS; |
| + } |
| + const ConsolidatedItem& item = consolidated_items_[consolidated_item_index]; |
| + if (item.type != DataElement::TYPE_BYTES) { |
| + return ReadStatus::ERROR_WRONG_TYPE; |
| + } |
| + if (consolidated_size + consolidated_offset > item.length) { |
| + LOG(ERROR) << "Invalid consolidated size " << consolidated_size |
| + << " and offset " << consolidated_offset << " vs item length of " |
| + << item.length; |
| + return ReadStatus::ERROR_OUT_OF_BOUNDS; |
| + } |
| + |
| + // We do a binary search to find the correct data to start with in the data |
|
michaeln
2015/06/17 03:17:55
fun ;)
|
| + // elements. This is slightly customized due to our unique storage and |
| + // constraints |
| + size_t mid = 0; |
| + size_t offset_from_mid = consolidated_offset; |
| + size_t num_items = item.data.size(); |
| + if (!item.offsets.empty()) { |
| + size_t low = 0; |
| + size_t high = num_items - 1; |
| + while (true) { |
| + mid = (high + low) / 2; |
| + // Note: we don't include the implicit '0' for the first item in offsets, |
| + // so we do (index - 1). |
|
michaeln
2015/06/17 03:17:55
i'd say you don't need what's after the , in the c
dmurph
2015/06/17 18:29:50
Done.
|
| + size_t item_offset = (mid == 0 ? 0 : item.offsets[mid - 1]); |
| + offset_from_mid = consolidated_offset - item_offset; |
| + size_t next_item_offset = (mid + 1 == num_items ? 0 : item.offsets[mid]); |
| + if (item_offset == consolidated_offset) { |
| + // found exact match. |
| + break; |
| + } else if (item_offset > consolidated_offset) { |
| + high = mid - 1; |
| + } else if (mid + 1 == num_items || |
| + next_item_offset > consolidated_offset) { |
| + // We are at the last item or the next offset it greater than the one |
| + // we want, so the current item wins. |
| + break; |
| + } else { |
| + low = mid + 1; |
| + } |
| + } |
| + } |
| + |
| + DCHECK_LT(offset_from_mid, item.data[mid].size()); |
| + // read starting from 'mid' and 'offset_from_mid' |
|
michaeln
2015/06/17 03:17:55
nit: cap "R" and a period at the end
dmurph
2015/06/17 18:29:50
Done.
|
| + for (size_t memory_read = 0; |
| + mid < num_items && memory_read < consolidated_size; mid++) { |
| + size_t read_size = std::min(item.data[mid].size() - offset_from_mid, |
| + consolidated_size - memory_read); |
| + memmove(memory_out + memory_read, item.data[mid].data() + offset_from_mid, |
| + read_size); |
| + offset_from_mid = 0; |
| + memory_read += read_size; |
| + } |
| + return ReadStatus::DONE; |
| +} |
| + |
| +} // namespace content |