OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include <algorithm> | |
6 | |
7 #include "storage/browser/blob/blob_async_transport_strategy.h" | |
8 #include "storage/common/blob_storage/blob_storage_constants.h" | |
9 | |
10 namespace storage { | |
11 namespace { | |
12 bool IsBytes(DataElement::Type type) { | |
13 return type == DataElement::TYPE_BYTES || | |
14 type == DataElement::TYPE_BYTES_DESCRIPTION; | |
15 } | |
16 } // namespace | |
17 | |
18 // This class handles the logic of how transported memory is going to be | |
19 // represented as storage in the browser. The main idea is that all the memory | |
20 // is now packed into file chunks, and the browser items will just reference | |
21 // the file with offsets and sizes. | |
22 class FileStorageStrategy | |
23 : public BlobAsyncTransportStrategy::BlobSegmentVisitor<uint64_t> { | |
24 public: | |
25 typedef uint64_t SizeType; | |
26 | |
27 FileStorageStrategy( | |
28 std::vector<BlobAsyncTransportStrategy::RendererMemoryItemRequest>* | |
29 requests, | |
30 BlobDataBuilder* builder) | |
31 : requests(requests), builder(builder), current_item_index(0) {} | |
32 | |
33 ~FileStorageStrategy() override {} | |
34 | |
35 void VisitBytesSegment(size_t element_index, | |
36 SizeType element_offset, | |
37 size_t segment_index, | |
38 SizeType segment_offset, | |
39 SizeType size) override { | |
40 BlobAsyncTransportStrategy::RendererMemoryItemRequest request; | |
41 request.browser_item_index = current_item_index; | |
42 request.browser_item_offset = 0; | |
43 request.message.request_number = requests->size(); | |
44 request.message.transport_strategy = IPCBlobItemRequestStrategy::FILE; | |
45 request.message.renderer_item_index = element_index; | |
46 request.message.renderer_item_offset = element_offset; | |
47 request.message.size = size; | |
48 request.message.handle_index = segment_index; | |
49 request.message.handle_offset = segment_offset; | |
50 | |
51 requests->push_back(request); | |
52 builder->AppendFutureFile(segment_offset, size); | |
53 current_item_index++; | |
54 } | |
55 | |
56 void VisitNonBytesSegment(const DataElement& element, | |
57 size_t element_index) override { | |
58 builder->AppendIPCDataElement(element); | |
59 current_item_index++; | |
60 } | |
61 | |
62 void Done() override {} | |
63 | |
64 std::vector<BlobAsyncTransportStrategy::RendererMemoryItemRequest>* requests; | |
65 BlobDataBuilder* builder; | |
66 | |
67 size_t current_item_index; | |
68 }; | |
69 | |
70 // This class handles the logic of storing memory that is transported as | |
71 // consolidated shared memory. | |
72 class SharedMemoryStorageStrategy | |
73 : public BlobAsyncTransportStrategy::BlobSegmentVisitor<size_t> { | |
74 public: | |
75 typedef size_t SizeType; | |
76 | |
77 SharedMemoryStorageStrategy( | |
78 SizeType max_segment_size, | |
79 std::vector<BlobAsyncTransportStrategy::RendererMemoryItemRequest>* | |
80 requests, | |
81 BlobDataBuilder* builder) | |
82 : requests(requests), | |
83 max_segment_size(max_segment_size), | |
84 builder(builder), | |
85 current_item_size(0), | |
86 current_item_index(0) {} | |
87 ~SharedMemoryStorageStrategy() override {} | |
88 | |
89 void VisitBytesSegment(size_t element_index, | |
90 SizeType element_offset, | |
91 size_t segment_index, | |
92 SizeType segment_offset, | |
93 SizeType size) override { | |
94 if (current_item_size + size > max_segment_size) { | |
95 builder->AppendFutureData(current_item_size); | |
96 current_item_index++; | |
97 current_item_size = 0; | |
98 } | |
99 BlobAsyncTransportStrategy::RendererMemoryItemRequest request; | |
100 request.browser_item_index = current_item_index; | |
101 request.browser_item_offset = current_item_size; | |
102 request.message.request_number = requests->size(); | |
103 request.message.transport_strategy = | |
104 IPCBlobItemRequestStrategy::SHARED_MEMORY; | |
105 request.message.renderer_item_index = element_index; | |
106 request.message.renderer_item_offset = element_offset; | |
107 request.message.size = size; | |
108 request.message.handle_index = segment_index; | |
109 request.message.handle_offset = segment_offset; | |
110 | |
111 requests->push_back(request); | |
112 current_item_size += size; | |
113 }; | |
114 | |
115 void VisitNonBytesSegment(const DataElement& element, | |
116 size_t element_index) override { | |
117 if (current_item_size != 0) { | |
118 builder->AppendFutureData(current_item_size); | |
119 current_item_index++; | |
120 } | |
121 builder->AppendIPCDataElement(element); | |
122 current_item_index++; | |
123 current_item_size = 0; | |
124 } | |
125 | |
126 void Done() override { | |
127 if (current_item_size != 0) { | |
128 builder->AppendFutureData(current_item_size); | |
129 } | |
130 } | |
131 | |
132 std::vector<BlobAsyncTransportStrategy::RendererMemoryItemRequest>* requests; | |
133 | |
134 SizeType max_segment_size; | |
135 BlobDataBuilder* builder; | |
136 SizeType current_item_size; | |
137 size_t current_item_index; | |
138 }; | |
139 | |
140 BlobAsyncTransportStrategy::RendererMemoryItemRequest:: | |
141 RendererMemoryItemRequest() | |
142 : browser_item_index(0), browser_item_offset(0), received(false) {} | |
143 | |
144 BlobAsyncTransportStrategy::BlobAsyncTransportStrategy() | |
145 : error_(BlobAsyncTransportStrategy::ERROR_NONE), total_bytes_size_(0) {} | |
146 | |
147 BlobAsyncTransportStrategy::~BlobAsyncTransportStrategy() {} | |
148 | |
149 // if total_blob_size > kMaxBlobSize (say 400MB) | |
150 // Request all data in files | |
151 // (Segment all of the existing data into | |
152 // file blocks, of <= kMaxFileSize) | |
153 // else if total_blob_size > kMaxIPCSize (this is 150KB) | |
154 // Request all data in shared memory | |
155 // (Segment all of the existing data into | |
156 // shared memory blocks, of <= kMaxSharedMemorySize) | |
157 // else | |
158 // Request all data to be sent over IPC | |
159 void BlobAsyncTransportStrategy::Initialize( | |
160 size_t max_ipc_memory_size, | |
161 size_t max_shared_memory_size, | |
162 uint64_t max_file_size, | |
163 uint64_t disk_space_left, | |
164 size_t memory_available, | |
165 const std::string& uuid, | |
166 const std::vector<DataElement>& blob_item_infos) { | |
167 DCHECK(file_handle_sizes_.empty()); | |
168 DCHECK(shared_memory_handle_sizes_.empty()); | |
169 DCHECK(requests_.empty()); | |
170 DCHECK(!builder_.get()); | |
171 builder_.reset(new BlobDataBuilder(uuid)); | |
172 error_ = BlobAsyncTransportStrategy::ERROR_NONE; | |
173 | |
174 size_t memory_items = 0; | |
175 total_bytes_size_ = 0; | |
176 for (const auto& info : blob_item_infos) { | |
177 if (!IsBytes(info.type())) { | |
178 continue; | |
179 } | |
180 total_bytes_size_ += info.length(); | |
181 ++memory_items; | |
182 } | |
183 | |
184 // See if we have enough memory | |
kinuko
2015/11/25 16:08:17
nit: finish comment with period
dmurph
2015/11/25 21:16:30
Done.
| |
185 if (total_bytes_size_ > | |
186 disk_space_left + static_cast<uint64_t>(memory_available)) { | |
187 error_ = BlobAsyncTransportStrategy::ERROR_TOO_LARGE; | |
188 return; | |
189 } | |
190 | |
191 // If we're more than the available memory, then we're going straight to disk. | |
192 if (total_bytes_size_ > memory_available) { | |
193 if (total_bytes_size_ > disk_space_left) { | |
194 error_ = BlobAsyncTransportStrategy::ERROR_TOO_LARGE; | |
195 return; | |
196 } | |
197 ComputeHandleSizes(total_bytes_size_, max_file_size, &file_handle_sizes_); | |
198 FileStorageStrategy strategy(&requests_, builder_.get()); | |
199 ForEachWithSegment(blob_item_infos, max_file_size, &strategy); | |
200 return; | |
201 } | |
202 | |
203 if (total_bytes_size_ > max_ipc_memory_size) { | |
204 CHECK_LE(total_bytes_size_, std::numeric_limits<size_t>::max()); | |
205 ComputeHandleSizes(static_cast<size_t>(total_bytes_size_), | |
206 max_shared_memory_size, &shared_memory_handle_sizes_); | |
207 SharedMemoryStorageStrategy strategy(max_shared_memory_size, &requests_, | |
208 builder_.get()); | |
209 ForEachWithSegment(blob_item_infos, max_shared_memory_size, &strategy); | |
210 return; | |
211 } | |
212 | |
213 // Since they can all fit in IPC memory, we don't need to segment anything, | |
214 // and just request them straight in IPC. | |
215 size_t items_length = blob_item_infos.size(); | |
216 for (size_t i = 0; i < items_length; i++) { | |
217 const auto& info = blob_item_infos.at(i); | |
218 if (!IsBytes(info.type())) { | |
219 builder_->AppendIPCDataElement(info); | |
220 continue; | |
221 } | |
222 BlobAsyncTransportStrategy::RendererMemoryItemRequest request; | |
223 request.browser_item_index = i; | |
224 request.browser_item_offset = 0; | |
225 request.message.request_number = requests_.size(); | |
226 request.message.transport_strategy = IPCBlobItemRequestStrategy::IPC; | |
227 request.message.renderer_item_index = i; | |
228 request.message.renderer_item_offset = 0; | |
229 request.message.size = info.length(); | |
230 requests_.push_back(request); | |
231 builder_->AppendFutureData(info.length()); | |
232 } | |
233 } | |
234 | |
235 // Splits each |element| into one or more |segments| of a max_size, invokes the | |
236 // strategy to determine the request to make for each |segment| produced. A | |
237 // |segment| can also span multiple |elements|. | |
238 /* static */ | |
239 template <typename SizeType> | |
240 void BlobAsyncTransportStrategy::ForEachWithSegment( | |
241 const std::vector<DataElement>& elements, | |
242 SizeType max_segment_size, | |
243 BlobSegmentVisitor<SizeType>* visitor) { | |
244 DCHECK_GT(max_segment_size, 0ull); | |
245 size_t segment_index = 0; | |
246 SizeType segment_offset = 0; | |
247 size_t elements_length = elements.size(); | |
248 for (size_t element_index = 0; element_index < elements_length; | |
249 ++element_index) { | |
250 const auto& element = elements.at(element_index); | |
251 DataElement::Type type = element.type(); | |
252 if (!IsBytes(type)) { | |
253 visitor->VisitNonBytesSegment(element, element_index); | |
254 continue; | |
255 } | |
256 SizeType element_memory_left = element.length(); | |
257 SizeType element_offset = 0; | |
258 while (element_memory_left > 0) { | |
259 if (segment_offset == max_segment_size) { | |
260 ++segment_index; | |
261 segment_offset = 0; | |
262 } | |
263 SizeType memory_writing = | |
264 std::min(max_segment_size - segment_offset, element_memory_left); | |
265 visitor->VisitBytesSegment(element_index, element_offset, segment_index, | |
266 segment_offset, memory_writing); | |
267 element_memory_left -= memory_writing; | |
268 segment_offset += memory_writing; | |
269 element_offset += memory_writing; | |
270 } | |
271 } | |
272 visitor->Done(); | |
273 } | |
274 | |
275 /* static */ | |
276 bool BlobAsyncTransportStrategy::ShouldBeShortcut( | |
277 const std::vector<DataElement>& elements, | |
278 size_t memory_available) { | |
279 size_t shortcut_bytes = 0; | |
280 for (const auto& element : elements) { | |
281 DataElement::Type type = element.type(); | |
282 if (type == DataElement::TYPE_BYTES_DESCRIPTION) { | |
283 return false; | |
284 } | |
285 if (type == DataElement::TYPE_BYTES) { | |
286 shortcut_bytes += element.length(); | |
287 } | |
288 } | |
289 return shortcut_bytes <= memory_available; | |
290 } | |
291 | |
292 /* static */ | |
293 template <typename SizeType> | |
294 void BlobAsyncTransportStrategy::ComputeHandleSizes( | |
295 SizeType total_memory_size, | |
296 SizeType max_segment_size, | |
297 std::vector<SizeType>* segment_sizes) { | |
298 size_t total_max_segments = | |
299 static_cast<size_t>(total_memory_size / max_segment_size); | |
300 bool hasExtraSegment = (total_memory_size % max_segment_size) > 0; | |
301 segment_sizes->reserve(total_max_segments + (hasExtraSegment ? 1 : 0)); | |
302 segment_sizes->insert(segment_sizes->begin(), total_max_segments, | |
303 max_segment_size); | |
304 if (hasExtraSegment) { | |
305 segment_sizes->push_back(total_memory_size % max_segment_size); | |
306 } | |
307 } | |
308 | |
309 } // namespace storage | |
OLD | NEW |