Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include <algorithm> | |
| 6 | |
| 7 #include "base/numerics/safe_math.h" | |
| 8 #include "storage/browser/blob/blob_async_transport_strategy.h" | |
| 9 #include "storage/common/blob_storage/blob_storage_constants.h" | |
| 10 | |
| 11 namespace storage { | |
| 12 namespace { | |
| 13 bool IsBytes(DataElement::Type type) { | |
| 14 return type == DataElement::TYPE_BYTES || | |
| 15 type == DataElement::TYPE_BYTES_DESCRIPTION; | |
| 16 } | |
| 17 | |
| 18 // This is the general template that each strategy below implements. See the | |
| 19 // ForEachWithSegment method for a description of how these are called. | |
| 20 // class BlobSegmentVisitor { | |
| 21 // public: | |
| 22 // typedef ___ SizeType; | |
| 23 // void VisitBytesSegment(size_t element_index, SizeType element_offset, | |
| 24 // size_t segment_index, SizeType segment_offset, | |
| 25 // SizeType size); | |
| 26 // void VisitNonBytesSegment(const DataElement& element, size_t element_idx); | |
| 27 // void Done(); | |
| 28 // }; | |
| 29 | |
| 30 // This class handles the logic of how transported memory is going to be | |
| 31 // represented as storage in the browser. The main idea is that all the memory | |
| 32 // is now packed into file chunks, and the browser items will just reference | |
| 33 // the file with offsets and sizes. | |
| 34 class FileStorageStrategy { | |
| 35 public: | |
| 36 typedef uint64_t SizeType; | |
| 37 | |
| 38 FileStorageStrategy( | |
| 39 std::vector<BlobAsyncTransportStrategy::RendererMemoryItemRequest>* | |
| 40 requests, | |
| 41 BlobDataBuilder* builder) | |
| 42 : requests(requests), builder(builder), current_item_index(0) {} | |
| 43 | |
| 44 ~FileStorageStrategy() {} | |
| 45 | |
| 46 void VisitBytesSegment(size_t element_index, | |
| 47 SizeType element_offset, | |
| 48 size_t segment_index, | |
| 49 SizeType segment_offset, | |
| 50 SizeType size) { | |
| 51 BlobAsyncTransportStrategy::RendererMemoryItemRequest request; | |
| 52 request.browser_item_index = current_item_index; | |
| 53 request.browser_item_offset = 0; | |
| 54 request.message.request_number = requests->size(); | |
| 55 request.message.transport_strategy = IPCBlobItemRequestStrategy::FILE; | |
| 56 request.message.renderer_item_index = element_index; | |
| 57 request.message.renderer_item_offset = element_offset; | |
| 58 request.message.size = size; | |
| 59 request.message.handle_index = segment_index; | |
| 60 request.message.handle_offset = segment_offset; | |
| 61 | |
| 62 requests->push_back(request); | |
| 63 builder->AppendFutureFile(segment_offset, size); | |
| 64 current_item_index++; | |
| 65 } | |
| 66 | |
| 67 void VisitNonBytesSegment(const DataElement& element, size_t element_index) { | |
| 68 builder->AppendIPCDataElement(element); | |
| 69 current_item_index++; | |
| 70 } | |
| 71 | |
| 72 void Done() {} | |
| 73 | |
| 74 std::vector<BlobAsyncTransportStrategy::RendererMemoryItemRequest>* requests; | |
| 75 BlobDataBuilder* builder; | |
| 76 | |
| 77 size_t current_item_index; | |
| 78 }; | |
| 79 | |
| 80 // This class handles the logic of storing memory that is transported as | |
| 81 // consolidated shared memory. | |
| 82 class SharedMemoryStorageStrategy { | |
| 83 public: | |
| 84 typedef size_t SizeType; | |
| 85 | |
| 86 SharedMemoryStorageStrategy( | |
| 87 SizeType max_segment_size, | |
| 88 std::vector<BlobAsyncTransportStrategy::RendererMemoryItemRequest>* | |
| 89 requests, | |
| 90 BlobDataBuilder* builder) | |
| 91 : requests(requests), | |
| 92 max_segment_size(max_segment_size), | |
| 93 builder(builder), | |
| 94 current_item_size(0), | |
| 95 current_item_index(0) {} | |
| 96 ~SharedMemoryStorageStrategy() {} | |
| 97 | |
| 98 void VisitBytesSegment(size_t element_index, | |
| 99 SizeType element_offset, | |
| 100 size_t segment_index, | |
| 101 SizeType segment_offset, | |
| 102 SizeType size) { | |
| 103 if (current_item_size + size > max_segment_size) { | |
| 104 builder->AppendFutureData(current_item_size); | |
| 105 current_item_index++; | |
| 106 current_item_size = 0; | |
| 107 } | |
| 108 BlobAsyncTransportStrategy::RendererMemoryItemRequest request; | |
| 109 request.browser_item_index = current_item_index; | |
| 110 request.browser_item_offset = current_item_size; | |
| 111 request.message.request_number = requests->size(); | |
| 112 request.message.transport_strategy = | |
| 113 IPCBlobItemRequestStrategy::SHARED_MEMORY; | |
| 114 request.message.renderer_item_index = element_index; | |
| 115 request.message.renderer_item_offset = element_offset; | |
| 116 request.message.size = size; | |
| 117 request.message.handle_index = segment_index; | |
| 118 request.message.handle_offset = segment_offset; | |
| 119 | |
| 120 requests->push_back(request); | |
| 121 current_item_size += size; | |
| 122 } | |
| 123 | |
| 124 void VisitNonBytesSegment(const DataElement& element, size_t element_index) { | |
| 125 if (current_item_size != 0) { | |
| 126 builder->AppendFutureData(current_item_size); | |
| 127 current_item_index++; | |
| 128 } | |
| 129 builder->AppendIPCDataElement(element); | |
| 130 current_item_index++; | |
| 131 current_item_size = 0; | |
| 132 } | |
| 133 | |
| 134 void Done() { | |
| 135 if (current_item_size != 0) { | |
| 136 builder->AppendFutureData(current_item_size); | |
| 137 } | |
| 138 } | |
| 139 | |
| 140 std::vector<BlobAsyncTransportStrategy::RendererMemoryItemRequest>* requests; | |
| 141 | |
| 142 SizeType max_segment_size; | |
| 143 BlobDataBuilder* builder; | |
| 144 SizeType current_item_size; | |
| 145 size_t current_item_index; | |
| 146 }; | |
| 147 | |
| 148 // This iterates of the data elements and segments the 'bytes' data into | |
| 149 // the smallest number of segments given the max_segment_size. | |
| 150 // The callback describes either: | |
| 151 // * A non-memory item | |
| 152 // * A partition of a bytes element which will be populated into a given | |
| 153 // segment and segment offset. | |
| 154 // More specifically, we split each |element| into one or more |segments| of a | |
| 155 // max_size, invokes the strategy to determine the request to make for each | |
| 156 // |segment| produced. A |segment| can also span multiple |elements|. | |
| 157 // Assumptions: All memory items are consolidated. As in, there are no two | |
| 158 // 'bytes' items next to eachother. | |
| 159 template <typename Visitor, typename SizeType = typename Visitor::SizeType> | |
| 160 void ForEachWithSegment(const std::vector<DataElement>& elements, | |
| 161 SizeType max_segment_size, | |
| 162 Visitor* visitor) { | |
| 163 DCHECK_GT(max_segment_size, 0ull); | |
| 164 size_t segment_index = 0; | |
| 165 SizeType segment_offset = 0; | |
| 166 size_t elements_length = elements.size(); | |
| 167 for (size_t element_index = 0; element_index < elements_length; | |
| 168 ++element_index) { | |
| 169 const auto& element = elements.at(element_index); | |
| 170 DataElement::Type type = element.type(); | |
| 171 if (!IsBytes(type)) { | |
| 172 visitor->VisitNonBytesSegment(element, element_index); | |
| 173 continue; | |
| 174 } | |
| 175 SizeType element_memory_left = element.length(); | |
| 176 SizeType element_offset = 0; | |
| 177 while (element_memory_left > 0) { | |
| 178 if (segment_offset == max_segment_size) { | |
| 179 ++segment_index; | |
| 180 segment_offset = 0; | |
| 181 } | |
| 182 SizeType memory_writing = | |
| 183 std::min(max_segment_size - segment_offset, element_memory_left); | |
| 184 visitor->VisitBytesSegment(element_index, element_offset, segment_index, | |
| 185 segment_offset, memory_writing); | |
| 186 element_memory_left -= memory_writing; | |
| 187 segment_offset += memory_writing; | |
| 188 element_offset += memory_writing; | |
| 189 } | |
| 190 } | |
| 191 visitor->Done(); | |
| 192 } | |
| 193 } // namespace | |
| 194 | |
| 195 BlobAsyncTransportStrategy::RendererMemoryItemRequest:: | |
| 196 RendererMemoryItemRequest() | |
| 197 : browser_item_index(0), browser_item_offset(0), received(false) {} | |
| 198 | |
| 199 BlobAsyncTransportStrategy::BlobAsyncTransportStrategy() | |
| 200 : error_(BlobAsyncTransportStrategy::ERROR_NONE), total_bytes_size_(0) {} | |
| 201 | |
| 202 BlobAsyncTransportStrategy::~BlobAsyncTransportStrategy() {} | |
| 203 | |
| 204 // if total_blob_size > |memory_available| (say 400MB) | |
| 205 // Request all data in files | |
| 206 // (Segment all of the existing data into | |
| 207 // file blocks, of <= |max_file_size|) | |
| 208 // else if total_blob_size > |max_ipc_memory_size| (say 150KB) | |
| 209 // Request all data in shared memory | |
| 210 // (Segment all of the existing data into | |
| 211 // shared memory blocks, of <= |max_shared_memory_size|) | |
| 212 // else | |
| 213 // Request all data to be sent over IPC | |
| 214 void BlobAsyncTransportStrategy::Initialize( | |
| 215 size_t max_ipc_memory_size, | |
| 216 size_t max_shared_memory_size, | |
| 217 size_t max_file_size, | |
| 218 uint64_t disk_space_left, | |
| 219 size_t memory_available, | |
| 220 const std::string& uuid, | |
| 221 const std::vector<DataElement>& blob_item_infos) { | |
| 222 DCHECK(handle_sizes_.empty()); | |
| 223 DCHECK(requests_.empty()); | |
| 224 DCHECK(!builder_.get()); | |
| 225 builder_.reset(new BlobDataBuilder(uuid)); | |
| 226 error_ = BlobAsyncTransportStrategy::ERROR_NONE; | |
| 227 | |
| 228 size_t memory_items = 0; | |
| 229 base::CheckedNumeric<uint64_t> total_size_checked = 0; | |
| 230 for (const auto& info : blob_item_infos) { | |
| 231 if (!IsBytes(info.type())) { | |
| 232 continue; | |
| 233 } | |
| 234 total_size_checked += info.length(); | |
| 235 ++memory_items; | |
| 236 } | |
| 237 | |
| 238 if (!total_size_checked.IsValid()) { | |
| 239 DVLOG(1) << "Impossible total size of all memory elements."; | |
| 240 error_ = BlobAsyncTransportStrategy::ERROR_INVALID_PARAMS; | |
| 241 return; | |
| 242 } | |
| 243 | |
| 244 total_bytes_size_ = total_size_checked.ValueOrDie(); | |
| 245 | |
| 246 // See if we have enough memory. | |
| 247 if (total_bytes_size_ > | |
| 248 disk_space_left + static_cast<uint64_t>(memory_available)) { | |
| 249 error_ = BlobAsyncTransportStrategy::ERROR_TOO_LARGE; | |
| 250 return; | |
| 251 } | |
| 252 | |
| 253 // If we're more than the available memory, then we're going straight to disk. | |
| 254 if (total_bytes_size_ > memory_available) { | |
| 255 if (total_bytes_size_ > disk_space_left) { | |
| 256 error_ = BlobAsyncTransportStrategy::ERROR_TOO_LARGE; | |
| 257 return; | |
| 258 } | |
| 259 ComputeHandleSizes(total_bytes_size_, max_file_size, &handle_sizes_); | |
| 260 FileStorageStrategy strategy(&requests_, builder_.get()); | |
| 261 ForEachWithSegment(blob_item_infos, static_cast<uint64_t>(max_file_size), | |
|
michaeln
2015/12/01 20:33:14
why is this cast needed, its of type size_t? and i
dmurph
2015/12/01 20:44:15
because it's waaaaay easier if we cast this to mat
| |
| 262 &strategy); | |
| 263 return; | |
| 264 } | |
| 265 | |
| 266 if (total_bytes_size_ > max_ipc_memory_size) { | |
| 267 if (total_bytes_size_ > std::numeric_limits<size_t>::max()) { | |
| 268 DVLOG(1) << "Impossible total size of all memory elements."; | |
| 269 error_ = BlobAsyncTransportStrategy::ERROR_INVALID_PARAMS; | |
| 270 return; | |
| 271 } | |
| 272 ComputeHandleSizes(static_cast<size_t>(total_bytes_size_), | |
|
michaeln
2015/12/01 20:33:14
i dont think this cast is needed
dmurph
2015/12/01 20:44:15
with the below change I don't need it.
| |
| 273 max_shared_memory_size, &handle_sizes_); | |
| 274 SharedMemoryStorageStrategy strategy(max_shared_memory_size, &requests_, | |
| 275 builder_.get()); | |
| 276 ForEachWithSegment(blob_item_infos, max_shared_memory_size, &strategy); | |
| 277 return; | |
| 278 } | |
| 279 | |
| 280 // Since they can all fit in IPC memory, we don't need to segment anything, | |
| 281 // and just request them straight in IPC. | |
| 282 size_t items_length = blob_item_infos.size(); | |
| 283 for (size_t i = 0; i < items_length; i++) { | |
| 284 const auto& info = blob_item_infos.at(i); | |
| 285 if (!IsBytes(info.type())) { | |
| 286 builder_->AppendIPCDataElement(info); | |
| 287 continue; | |
| 288 } | |
| 289 BlobAsyncTransportStrategy::RendererMemoryItemRequest request; | |
| 290 request.browser_item_index = i; | |
| 291 request.browser_item_offset = 0; | |
| 292 request.message.request_number = requests_.size(); | |
| 293 request.message.transport_strategy = IPCBlobItemRequestStrategy::IPC; | |
| 294 request.message.renderer_item_index = i; | |
| 295 request.message.renderer_item_offset = 0; | |
| 296 request.message.size = info.length(); | |
| 297 requests_.push_back(request); | |
| 298 builder_->AppendFutureData(info.length()); | |
| 299 } | |
| 300 } | |
| 301 | |
| 302 /* static */ | |
| 303 bool BlobAsyncTransportStrategy::ShouldBeShortcut( | |
| 304 const std::vector<DataElement>& elements, | |
| 305 size_t memory_available) { | |
| 306 base::CheckedNumeric<size_t> shortcut_bytes = 0; | |
| 307 for (const auto& element : elements) { | |
| 308 DataElement::Type type = element.type(); | |
| 309 if (type == DataElement::TYPE_BYTES_DESCRIPTION) { | |
| 310 return false; | |
| 311 } | |
| 312 if (type == DataElement::TYPE_BYTES) { | |
| 313 shortcut_bytes += element.length(); | |
| 314 if (!shortcut_bytes.IsValid()) { | |
| 315 return false; | |
| 316 } | |
| 317 } | |
| 318 } | |
| 319 return shortcut_bytes.ValueOrDie() <= memory_available; | |
| 320 } | |
| 321 | |
| 322 /* static */ | |
| 323 void BlobAsyncTransportStrategy::ComputeHandleSizes( | |
| 324 size_t total_memory_size, | |
|
michaeln
2015/12/01 20:33:14
I think this first param should be uint64_t. Since
dmurph
2015/12/01 20:44:16
Done.
| |
| 325 size_t max_segment_size, | |
| 326 std::vector<size_t>* segment_sizes) { | |
| 327 size_t total_max_segments = | |
| 328 static_cast<size_t>(total_memory_size / max_segment_size); | |
| 329 bool has_extra_segment = (total_memory_size % max_segment_size) > 0; | |
| 330 segment_sizes->reserve(total_max_segments + (has_extra_segment ? 1 : 0)); | |
| 331 segment_sizes->insert(segment_sizes->begin(), total_max_segments, | |
| 332 max_segment_size); | |
| 333 if (has_extra_segment) { | |
| 334 segment_sizes->push_back(total_memory_size % max_segment_size); | |
| 335 } | |
| 336 } | |
| 337 | |
| 338 } // namespace storage | |
| OLD | NEW |