Chromium Code Reviews| Index: storage/browser/blob/blob_slice.cc |
| diff --git a/storage/browser/blob/blob_slice.cc b/storage/browser/blob/blob_slice.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..96be5a587e7a87ee858ebae8b14c24458ddae3d8 |
| --- /dev/null |
| +++ b/storage/browser/blob/blob_slice.cc |
| @@ -0,0 +1,125 @@ |
| +// Copyright 2016 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "storage/browser/blob/blob_slice.h" |
| + |
| +#include <algorithm> |
| + |
| +#include "storage/browser/blob/blob_data_builder.h" |
| +#include "storage/browser/blob/blob_data_item.h" |
| +#include "storage/browser/blob/internal_blob_data.h" |
| +#include "storage/browser/blob/shareable_blob_data_item.h" |
| + |
| +namespace storage { |
| + |
| +BlobSlice::BlobSlice(const InternalBlobData& source, |
| + uint64_t slice_offset, |
| + uint64_t slice_size) { |
| + const auto& source_items = source.items(); |
| + const auto& offsets = source.offsets(); |
| + LOG(ERROR) << "doing a slice at " << slice_offset << " with size " |
| + << slice_size; |
| + DCHECK_LE(slice_offset + slice_size, source.total_size()); |
| + size_t item_index = |
| + std::upper_bound(offsets.begin(), offsets.end(), slice_offset) - |
| + offsets.begin(); |
| + uint64_t item_offset = |
| + item_index == 0 ? slice_offset : slice_offset - offsets[item_index - 1]; |
| + size_t num_items = source_items.size(); |
| + |
| + size_t first_item_index = item_index; |
| + copying_memory_size = 0; |
| + |
| + // Read starting from 'first_item_index' and 'item_offset'. |
| + for (uint64_t total_sliced = 0; |
| + item_index < num_items && total_sliced < slice_size; item_index++) { |
| + const scoped_refptr<BlobDataItem>& source_item = |
| + source_items[item_index]->item(); |
| + uint64_t source_length = source_item->length(); |
| + DCHECK_NE(source_length, std::numeric_limits<uint64_t>::max()); |
| + DCHECK_NE(source_length, 0ull); |
| + |
| + uint64_t read_size = |
| + std::min(source_length - item_offset, slice_size - total_sliced); |
| + total_sliced += read_size; |
| + |
| + if (read_size == source_length) { |
| + // We can share the entire item. |
| + LOG(ERROR) << "we can share"; |
| + dest_items.push_back(source_items[item_index]); |
| + continue; |
| + } |
| + |
| + scoped_refptr<BlobDataItem> data_item; |
| + switch (source_item->type()) { |
| + case DataElement::TYPE_BYTES_DESCRIPTION: |
| + case DataElement::TYPE_BYTES: { |
| + if (item_index == first_item_index) { |
| + first_item_slice_offset = item_offset; |
| + first_source_item = source_items[item_index]; |
| + } else { |
| + last_source_item = source_items[item_index]; |
| + } |
| + LOG(ERROR) << "we're sliceing a bytes item!"; |
|
kinuko
2016/07/17 16:15:47
(typo, but I assume we're removing these LOG(ERROR
dmurph
2016/07/19 02:26:27
They will all be removed, I have them to verify we
|
| + copying_memory_size += read_size; |
| + // Since we don't have quota yet for memory, we create temporary items |
| + // for this data. When our blob is finished constructing, all dependent |
| + // blobs are done, and we have enough memory quota, we'll copy the data |
| + // over. |
| + std::unique_ptr<DataElement> element(new DataElement()); |
| + element->SetToBytesDescription(base::checked_cast<size_t>(read_size)); |
| + data_item = new BlobDataItem(std::move(element)); |
| + } break; |
|
kinuko
2016/07/17 16:15:47
nit: is this style common? I think I see followin
dmurph
2016/07/19 02:26:27
Done.
|
| + case DataElement::TYPE_FILE: { |
| + std::unique_ptr<DataElement> element(new DataElement()); |
| + element->SetToFilePathRange( |
| + source_item->path(), source_item->offset() + item_offset, read_size, |
| + source_item->expected_modification_time()); |
| + data_item = |
| + new BlobDataItem(std::move(element), source_item->data_handle_); |
| + |
| + if (source_item->path().value() == |
| + std::string(BlobDataBuilder::kAppendFutureFileTemporaryFileName)) { |
| + // Since we don't have file path / reference for our future file, we |
| + // create another temporary file item. When our blob is finished |
| + // constructing, all dependent blobs are done, and we can copy the |
| + // handle over. |
| + LOG(ERROR) << "we're slicing a temp file item!"; |
| + if (item_index == first_item_index) { |
| + first_item_slice_offset = item_offset; |
| + first_source_item = source_items[item_index]; |
| + } else { |
| + last_source_item = source_items[item_index]; |
| + } |
| + } |
| + } break; |
| + case DataElement::TYPE_FILE_FILESYSTEM: { |
| + std::unique_ptr<DataElement> element(new DataElement()); |
| + element->SetToFileSystemUrlRange( |
| + source_item->filesystem_url(), source_item->offset() + item_offset, |
| + read_size, source_item->expected_modification_time()); |
| + data_item = new BlobDataItem(std::move(element)); |
| + } break; |
| + case DataElement::TYPE_DISK_CACHE_ENTRY: { |
| + std::unique_ptr<DataElement> element(new DataElement()); |
| + element->SetToDiskCacheEntryRange(source_item->offset() + item_offset, |
| + read_size); |
| + data_item = |
| + new BlobDataItem(std::move(element), source_item->data_handle_, |
| + source_item->disk_cache_entry(), |
| + source_item->disk_cache_stream_index(), |
| + source_item->disk_cache_side_stream_index()); |
| + } break; |
| + case DataElement::TYPE_BLOB: |
| + case DataElement::TYPE_UNKNOWN: |
| + CHECK(false) << "Illegal blob item type: " << source_item->type(); |
| + } |
| + dest_items.push_back(new ShareableBlobDataItem(std::move(data_item))); |
| + item_offset = 0; |
| + } |
| +} |
| + |
| +BlobSlice::~BlobSlice() {} |
| + |
| +} // namespace storage |