Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(379)

Side by Side Diff: storage/browser/blob/blob_async_builder_host.cc

Issue 1234813004: [BlobAsync] Asynchronous Blob Construction Final Patch (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@blob-protocol-change
Patch Set: comments Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "storage/browser/blob/blob_async_builder_host.h" 5 #include "storage/browser/blob/blob_async_builder_host.h"
6 6
7 #include <stddef.h> 7 #include <stddef.h>
8 #include <stdint.h> 8 #include <stdint.h>
9 9
10 #include <utility> 10 #include <utility>
11 11
12 #include "base/bind.h"
12 #include "base/memory/shared_memory.h" 13 #include "base/memory/shared_memory.h"
14 #include "storage/browser/blob/blob_data_handle.h"
15 #include "storage/browser/blob/blob_storage_context.h"
13 16
14 namespace storage { 17 namespace storage {
15 18 namespace {
16 using MemoryItemRequest = BlobAsyncTransportStrategy::RendererMemoryItemRequest; 19 bool CalculateBlobMemorySize(const std::vector<DataElement>& elements,
17 20 size_t* shortcut_bytes,
18 BlobAsyncBuilderHost::BlobBuildingState::BlobBuildingState() 21 uint64_t* total_bytes) {
19 : next_request(0), 22 DCHECK(shortcut_bytes);
20 num_fulfilled_requests(0), 23 DCHECK(total_bytes);
21 num_shared_memory_requests(0), 24 base::CheckedNumeric<uint64_t> total_size_checked = 0;
22 current_shared_memory_handle_index(0) {} 25 base::CheckedNumeric<size_t> shortcut_size_checked = 0;
26 for (const auto& e : elements) {
27 if (e.type() == DataElement::TYPE_BYTES) {
28 total_size_checked += e.length();
29 shortcut_size_checked += e.length();
30 } else if (e.type() == DataElement::TYPE_BYTES_DESCRIPTION) {
31 total_size_checked += e.length();
32 } else {
33 continue;
34 }
35 if (!total_size_checked.IsValid() || !shortcut_size_checked.IsValid()) {
36 return false;
37 }
38 }
39 *shortcut_bytes = shortcut_size_checked.ValueOrDie();
40 *total_bytes = total_size_checked.ValueOrDie();
41 return true;
42 }
43 } // namespace
44
45 using MemoryItemRequest =
46 BlobAsyncTransportRequestBuilder::RendererMemoryItemRequest;
47
48 BlobAsyncBuilderHost::BlobBuildingState::BlobBuildingState(
49 const std::string& uuid,
50 std::set<std::string> referenced_blob_uuids,
51 std::vector<scoped_ptr<BlobDataHandle>>* referenced_blob_handles)
52 : data_builder(uuid),
53 referenced_blob_uuids(referenced_blob_uuids),
54 referenced_blob_handles(std::move(*referenced_blob_handles)) {}
23 55
24 BlobAsyncBuilderHost::BlobBuildingState::~BlobBuildingState() {} 56 BlobAsyncBuilderHost::BlobBuildingState::~BlobBuildingState() {}
25 57
26 BlobAsyncBuilderHost::BlobAsyncBuilderHost() {} 58 BlobAsyncBuilderHost::BlobAsyncBuilderHost() : ptr_factory_(this) {}
27 59
28 BlobAsyncBuilderHost::~BlobAsyncBuilderHost() {} 60 BlobAsyncBuilderHost::~BlobAsyncBuilderHost() {}
29 61
30 bool BlobAsyncBuilderHost::StartBuildingBlob( 62 BlobTransportResult BlobAsyncBuilderHost::RegisterBlobUUID(
31 const std::string& uuid, 63 const std::string& uuid,
32 const std::string& type, 64 const std::string& content_type,
33 const std::vector<DataElement>& descriptions, 65 const std::string& content_disposition,
66 const std::set<std::string>& referenced_blob_uuids,
67 BlobStorageContext* context) {
68 if (async_blob_map_.find(uuid) != async_blob_map_.end())
69 return BlobTransportResult::BAD_IPC;
70 if (referenced_blob_uuids.find(uuid) != referenced_blob_uuids.end())
71 return BlobTransportResult::BAD_IPC;
72 context->CreatePendingBlob(uuid, content_type, content_disposition);
73 std::vector<scoped_ptr<BlobDataHandle>> handles;
74 for (const std::string& referenced_uuid : referenced_blob_uuids) {
75 scoped_ptr<BlobDataHandle> handle =
76 context->GetBlobDataFromUUID(referenced_uuid);
77 if (!handle || handle->IsBroken()) {
78 // We cancel the blob right away, and don't bother storing our state.
79 context->CancelPendingBlob(
80 uuid, IPCBlobCreationCancelCode::REFERENCED_BLOB_BROKEN);
81 return BlobTransportResult::CANCEL_REFERENCED_BLOB_BROKEN;
82 }
83 handles.emplace_back(std::move(handle));
84 }
85 async_blob_map_[uuid] = make_scoped_ptr(
86 new BlobBuildingState(uuid, referenced_blob_uuids, &handles));
87 return BlobTransportResult::DONE;
88 }
89
90 BlobTransportResult BlobAsyncBuilderHost::StartBuildingBlob(
91 const std::string& uuid,
92 const std::vector<DataElement>& elements,
34 size_t memory_available, 93 size_t memory_available,
35 const base::Callback<void(const std::vector<storage::BlobItemBytesRequest>&, 94 BlobStorageContext* context,
36 const std::vector<base::SharedMemoryHandle>&, 95 const RequestMemoryCallback& request_memory) {
37 const std::vector<uint64_t>&)>& request_memory, 96 DCHECK(context);
38 const base::Callback<void(const BlobDataBuilder&)>& done, 97 if (async_blob_map_.find(uuid) == async_blob_map_.end()) {
39 const base::Callback<void(IPCBlobCreationCancelCode)>& cancel) { 98 return BlobTransportResult::BAD_IPC;
40 if (async_blob_map_.find(uuid) != async_blob_map_.end()) 99 }
41 return false; 100
42 if (BlobAsyncTransportStrategy::ShouldBeShortcut(descriptions, 101 // Step 1: Get the sizes.
43 memory_available)) { 102 size_t shortcut_memory_size_bytes = 0;
44 // We have enough memory, and all the data is here, so we use the shortcut 103 uint64_t total_memory_size_bytes = 0;
45 // method and populate the old way. 104 if (!CalculateBlobMemorySize(elements, &shortcut_memory_size_bytes,
46 BlobDataBuilder builder(uuid); 105 &total_memory_size_bytes)) {
47 builder.set_content_type(type); 106 CancelBuildingBlob(uuid, IPCBlobCreationCancelCode::UNKNOWN, context);
48 for (const DataElement& element : descriptions) { 107 return BlobTransportResult::BAD_IPC;
49 builder.AppendIPCDataElement(element); 108 }
50 } 109
51 done.Run(builder); 110 // Step 2: Check if we have enough memory to store the blob.
52 return true; 111 if (total_memory_size_bytes > memory_available) {
53 } 112 CancelBuildingBlob(uuid, IPCBlobCreationCancelCode::OUT_OF_MEMORY, context);
54 113 return BlobTransportResult::CANCEL_MEMORY_FULL;
55 scoped_ptr<BlobBuildingState> state(new BlobBuildingState()); 114 }
56 BlobBuildingState* state_ptr = state.get(); 115
57 async_blob_map_[uuid] = std::move(state); 116 // From here on, we know we can fit the blob in memory.
58 state_ptr->type = type; 117 BlobBuildingState* state_ptr = async_blob_map_[uuid].get();
118 if (!state_ptr->request_builder.requests().empty()) {
119 // Check that we're not a duplicate call.
120 return BlobTransportResult::BAD_IPC;
121 }
59 state_ptr->request_memory_callback = request_memory; 122 state_ptr->request_memory_callback = request_memory;
60 state_ptr->done_callback = done; 123
61 state_ptr->cancel_callback = cancel; 124 // Step 3: Check to make sure the referenced blob information we received
62 125 // earlier is correct:
63 // We are currently only operating in 'no disk' mode. This will change in 126 std::set<std::string> extracted_blob_uuids;
64 // future patches to enable disk storage. 127 for (const DataElement& e : elements) {
65 // Since we don't have a disk yet, we put 0 for disk_space_left. 128 if (e.type() == DataElement::TYPE_BLOB) {
66 state_ptr->transport_strategy.Initialize( 129 extracted_blob_uuids.insert(e.blob_uuid());
67 max_ipc_memory_size_, max_shared_memory_size_, max_file_size_, 130 // We can't depend on ourselves.
68 0 /* disk_space_left */, memory_available, uuid, descriptions); 131 if (e.blob_uuid() == uuid) {
69 132 CancelBuildingBlob(uuid, IPCBlobCreationCancelCode::UNKNOWN, context);
70 switch (state_ptr->transport_strategy.error()) { 133 return BlobTransportResult::BAD_IPC;
71 case BlobAsyncTransportStrategy::ERROR_TOO_LARGE: 134 }
72 // Cancel cleanly, we're out of memory. 135 }
73 CancelAndCleanup(uuid, IPCBlobCreationCancelCode::OUT_OF_MEMORY); 136 }
74 return true; 137 if (extracted_blob_uuids != state_ptr->referenced_blob_uuids) {
75 case BlobAsyncTransportStrategy::ERROR_INVALID_PARAMS: 138 CancelBuildingBlob(uuid, IPCBlobCreationCancelCode::UNKNOWN, context);
76 // Bad IPC, so we ignore and clean up. 139 return BlobTransportResult::BAD_IPC;
77 VLOG(1) << "Error initializing transport strategy: " 140 }
78 << state_ptr->transport_strategy.error(); 141
79 async_blob_map_.erase(async_blob_map_.find(uuid)); 142 // Step 4: Decide if we're using the shortcut method. This will also catch
80 return false; 143 // the case where we don't have any memory items.
81 case BlobAsyncTransportStrategy::ERROR_NONE: 144 if (shortcut_memory_size_bytes == total_memory_size_bytes &&
82 ContinueBlobMemoryRequests(uuid); 145 shortcut_memory_size_bytes <= memory_available) {
83 return true; 146 for (const DataElement& e : elements) {
84 } 147 state_ptr->data_builder.AppendIPCDataElement(e);
85 return false; 148 }
149 FinishBuildingBlob(state_ptr, context);
150 return BlobTransportResult::DONE;
151 }
152
153 // From here on, we know the blob's size is less than |memory_available|,
154 // so we know we're < max(size_t).
155 // Step 5: Decide if we're using shared memory.
156 if (total_memory_size_bytes > max_ipc_memory_size_) {
157 state_ptr->request_builder.InitializeForSharedMemoryRequests(
158 max_shared_memory_size_, total_memory_size_bytes, elements,
159 &(state_ptr->data_builder));
160 } else {
161 // Step 6: We can fit in IPC.
162 state_ptr->request_builder.InitializeForIPCRequests(
163 max_ipc_memory_size_, total_memory_size_bytes, elements,
164 &(state_ptr->data_builder));
165 }
166 // We initialize our requests received state now that they are populated.
167 state_ptr->request_received.resize(
168 state_ptr->request_builder.requests().size(), false);
169 return ContinueBlobMemoryRequests(uuid, context);
86 } 170 }
87 171
88 bool BlobAsyncBuilderHost::OnMemoryResponses( 172 BlobTransportResult BlobAsyncBuilderHost::OnMemoryResponses(
89 const std::string& uuid, 173 const std::string& uuid,
90 const std::vector<BlobItemBytesResponse>& responses) { 174 const std::vector<BlobItemBytesResponse>& responses,
91 if (responses.empty()) { 175 BlobStorageContext* context) {
92 return false;
93 }
94 AsyncBlobMap::const_iterator state_it = async_blob_map_.find(uuid); 176 AsyncBlobMap::const_iterator state_it = async_blob_map_.find(uuid);
95 if (state_it == async_blob_map_.end()) { 177 if (state_it == async_blob_map_.end()) {
96 // There's a possibility that we had a shared memory error, and there were
97 // still responses in flight. So we don't fail here, we just ignore.
98 DVLOG(1) << "Could not find blob " << uuid; 178 DVLOG(1) << "Could not find blob " << uuid;
99 return true; 179 return BlobTransportResult::BAD_IPC;
180 }
181 if (responses.empty()) {
182 CancelBuildingBlob(uuid, IPCBlobCreationCancelCode::UNKNOWN, context);
183 return BlobTransportResult::BAD_IPC;
100 } 184 }
101 BlobAsyncBuilderHost::BlobBuildingState* state = state_it->second.get(); 185 BlobAsyncBuilderHost::BlobBuildingState* state = state_it->second.get();
102 BlobAsyncTransportStrategy& strategy = state->transport_strategy; 186 BlobAsyncTransportRequestBuilder& request_builder = state->request_builder;
103 bool invalid_ipc = false; 187 const auto& requests = request_builder.requests();
104 bool memory_error = false;
105 const auto& requests = strategy.requests();
106 for (const BlobItemBytesResponse& response : responses) { 188 for (const BlobItemBytesResponse& response : responses) {
107 if (response.request_number >= requests.size()) { 189 if (response.request_number >= requests.size()) {
108 // Bad IPC, so we delete our record and ignore. 190 // Bad IPC, so we delete our record and ignore.
109 DVLOG(1) << "Invalid request number " << response.request_number; 191 DVLOG(1) << "Invalid request number " << response.request_number;
110 async_blob_map_.erase(state_it); 192 CancelBuildingBlob(uuid, IPCBlobCreationCancelCode::UNKNOWN, context);
111 return false; 193 return BlobTransportResult::BAD_IPC;
112 } 194 }
195 DCHECK_LT(response.request_number, state->request_received.size());
113 const MemoryItemRequest& request = requests[response.request_number]; 196 const MemoryItemRequest& request = requests[response.request_number];
114 if (request.received) { 197 if (state->request_received[response.request_number]) {
115 // Bad IPC, so we delete our record. 198 // Bad IPC, so we delete our record.
116 DVLOG(1) << "Already received response for that request."; 199 DVLOG(1) << "Already received response for that request.";
117 async_blob_map_.erase(state_it); 200 CancelBuildingBlob(uuid, IPCBlobCreationCancelCode::UNKNOWN, context);
118 return false; 201 return BlobTransportResult::BAD_IPC;
119 } 202 }
120 strategy.MarkRequestAsReceived(response.request_number); 203 state->request_received[response.request_number] = true;
204 bool invalid_ipc = false;
205 bool memory_error = false;
121 switch (request.message.transport_strategy) { 206 switch (request.message.transport_strategy) {
122 case IPCBlobItemRequestStrategy::IPC: 207 case IPCBlobItemRequestStrategy::IPC:
123 if (response.inline_data.size() < request.message.size) { 208 if (response.inline_data.size() < request.message.size) {
124 DVLOG(1) << "Invalid data size " << response.inline_data.size() 209 DVLOG(1) << "Invalid data size " << response.inline_data.size()
125 << " vs requested size of " << request.message.size; 210 << " vs requested size of " << request.message.size;
126 invalid_ipc = true; 211 invalid_ipc = true;
127 break; 212 break;
128 } 213 }
129 invalid_ipc = !strategy.blob_builder()->PopulateFutureData( 214 invalid_ipc = !state->data_builder.PopulateFutureData(
130 request.browser_item_index, &response.inline_data[0], 215 request.browser_item_index, &response.inline_data[0],
131 request.browser_item_offset, request.message.size); 216 request.browser_item_offset, request.message.size);
132 break; 217 break;
133 case IPCBlobItemRequestStrategy::SHARED_MEMORY: 218 case IPCBlobItemRequestStrategy::SHARED_MEMORY:
134 if (state->num_shared_memory_requests == 0) { 219 if (state->num_shared_memory_requests == 0) {
135 DVLOG(1) << "Received too many responses for shared memory."; 220 DVLOG(1) << "Received too many responses for shared memory.";
136 invalid_ipc = true; 221 invalid_ipc = true;
137 break; 222 break;
138 } 223 }
139 state->num_shared_memory_requests--; 224 state->num_shared_memory_requests--;
140 if (!state->shared_memory_block->memory()) { 225 if (!state->shared_memory_block->memory()) {
141 // We just map the whole block, as we'll probably be accessing the 226 // We just map the whole block, as we'll probably be accessing the
142 // whole thing in this group of responses. Another option is to use 227 // whole thing in this group of responses. Another option is to use
143 // MapAt, remove the mapped boolean, and then exclude the 228 // MapAt, remove the mapped boolean, and then exclude the
144 // handle_offset below. 229 // handle_offset below.
145 size_t handle_size = strategy.handle_sizes().at( 230 size_t handle_size = request_builder.shared_memory_sizes()
146 state->current_shared_memory_handle_index); 231 [state->current_shared_memory_handle_index];
147 if (!state->shared_memory_block->Map(handle_size)) { 232 if (!state->shared_memory_block->Map(handle_size)) {
148 DVLOG(1) << "Unable to map memory to size " << handle_size; 233 DVLOG(1) << "Unable to map memory to size " << handle_size;
149 memory_error = true; 234 memory_error = true;
150 break; 235 break;
151 } 236 }
152 } 237 }
153 238
154 invalid_ipc = !strategy.blob_builder()->PopulateFutureData( 239 invalid_ipc = !state->data_builder.PopulateFutureData(
155 request.browser_item_index, 240 request.browser_item_index,
156 static_cast<const char*>(state->shared_memory_block->memory()) + 241 static_cast<const char*>(state->shared_memory_block->memory()) +
157 request.message.handle_offset, 242 request.message.handle_offset,
158 request.browser_item_offset, request.message.size); 243 request.browser_item_offset, request.message.size);
159 break; 244 break;
160 case IPCBlobItemRequestStrategy::FILE: 245 case IPCBlobItemRequestStrategy::FILE:
161 case IPCBlobItemRequestStrategy::UNKNOWN: 246 case IPCBlobItemRequestStrategy::UNKNOWN:
162 DVLOG(1) << "Not implemented."; 247 DVLOG(1) << "Not implemented.";
163 invalid_ipc = true; 248 invalid_ipc = true;
164 break; 249 break;
165 } 250 }
166 if (invalid_ipc) { 251 if (invalid_ipc) {
167 // Bad IPC, so we delete our record and return false. 252 // Bad IPC, so we delete our record and return false.
168 async_blob_map_.erase(state_it); 253 CancelBuildingBlob(uuid, IPCBlobCreationCancelCode::UNKNOWN, context);
169 return false; 254 return BlobTransportResult::BAD_IPC;
170 } 255 }
171 if (memory_error) { 256 if (memory_error) {
172 DVLOG(1) << "Shared memory error."; 257 DVLOG(1) << "Shared memory error.";
173 CancelAndCleanup(uuid, IPCBlobCreationCancelCode::OUT_OF_MEMORY); 258 CancelBuildingBlob(uuid, IPCBlobCreationCancelCode::OUT_OF_MEMORY,
174 return true; 259 context);
260 return BlobTransportResult::CANCEL_MEMORY_FULL;
175 } 261 }
176 state->num_fulfilled_requests++; 262 state->num_fulfilled_requests++;
177 } 263 }
178 ContinueBlobMemoryRequests(uuid); 264 return ContinueBlobMemoryRequests(uuid, context);
179 return true;
180 } 265 }
181 266
182 void BlobAsyncBuilderHost::StopBuildingBlob(const std::string& uuid) { 267 void BlobAsyncBuilderHost::CancelBuildingBlob(const std::string& uuid,
183 async_blob_map_.erase(async_blob_map_.find(uuid)); 268 IPCBlobCreationCancelCode code,
269 BlobStorageContext* context) {
270 DCHECK(context);
271 auto state_it = async_blob_map_.find(uuid);
272 if (state_it == async_blob_map_.end()) {
273 return;
274 }
275 // We can have the blob dereferenced by the renderer, but have it still being
276 // 'built'. In this case, it's destructed in the context, but we still have
277 // it in our map. Hence we make sure the context has the entry before
278 // calling cancel.
279 if (context->registry().HasEntry(uuid))
280 context->CancelPendingBlob(uuid, code);
281 async_blob_map_.erase(state_it);
184 } 282 }
185 283
186 void BlobAsyncBuilderHost::ContinueBlobMemoryRequests(const std::string& uuid) { 284 void BlobAsyncBuilderHost::CancelAll(BlobStorageContext* context) {
285 DCHECK(context);
286 // Some of our pending blobs may still be referenced elsewhere.
287 std::vector<scoped_ptr<BlobDataHandle>> referenced_pending_blobs;
288 for (const auto& uuid_state_pair : async_blob_map_) {
289 if (context->IsBeingBuilt(uuid_state_pair.first)) {
290 referenced_pending_blobs.emplace_back(
291 context->GetBlobDataFromUUID(uuid_state_pair.first));
292 }
293 }
294 // We clear the map before canceling them to prevent any strange reentry into
295 // our class (see ReferencedBlobFinished) if any blobs were waiting for others
296 // to construct.
297 async_blob_map_.clear();
298 for (const scoped_ptr<BlobDataHandle>& handle : referenced_pending_blobs) {
299 context->CancelPendingBlob(
300 handle->uuid(), IPCBlobCreationCancelCode::SOURCE_DIED_IN_TRANSIT);
301 }
302 }
303
304 BlobTransportResult BlobAsyncBuilderHost::ContinueBlobMemoryRequests(
305 const std::string& uuid,
306 BlobStorageContext* context) {
187 AsyncBlobMap::const_iterator state_it = async_blob_map_.find(uuid); 307 AsyncBlobMap::const_iterator state_it = async_blob_map_.find(uuid);
188 DCHECK(state_it != async_blob_map_.end()); 308 DCHECK(state_it != async_blob_map_.end());
189 BlobAsyncBuilderHost::BlobBuildingState* state = state_it->second.get(); 309 BlobAsyncBuilderHost::BlobBuildingState* state = state_it->second.get();
190 310
191 const std::vector<MemoryItemRequest>& requests = 311 BlobAsyncTransportRequestBuilder& request_builder = state->request_builder;
192 state->transport_strategy.requests(); 312 const std::vector<MemoryItemRequest>& requests = request_builder.requests();
193 BlobAsyncTransportStrategy& strategy = state->transport_strategy;
194 size_t num_requests = requests.size(); 313 size_t num_requests = requests.size();
195 if (state->num_fulfilled_requests == num_requests) { 314 if (state->num_fulfilled_requests == num_requests) {
196 DoneAndCleanup(uuid); 315 FinishBuildingBlob(state, context);
197 return; 316 return BlobTransportResult::DONE;
198 } 317 }
199 DCHECK_LT(state->num_fulfilled_requests, num_requests); 318 DCHECK_LT(state->num_fulfilled_requests, num_requests);
200 if (state->next_request == num_requests) { 319 if (state->next_request == num_requests) {
201 // We are still waiting on other requests to come back. 320 // We are still waiting on other requests to come back.
202 return; 321 return BlobTransportResult::PENDING_RESPONSES;
203 } 322 }
204 323
205 std::vector<BlobItemBytesRequest> byte_requests; 324 scoped_ptr<std::vector<BlobItemBytesRequest>> byte_requests(
206 std::vector<base::SharedMemoryHandle> shared_memory; 325 new std::vector<BlobItemBytesRequest>());
207 std::vector<uint64_t> files; 326 scoped_ptr<std::vector<base::SharedMemoryHandle>> shared_memory(
327 new std::vector<base::SharedMemoryHandle>());
208 328
209 for (; state->next_request < num_requests; ++state->next_request) { 329 for (; state->next_request < num_requests; ++state->next_request) {
210 const MemoryItemRequest& request = requests[state->next_request]; 330 const MemoryItemRequest& request = requests[state->next_request];
211 331
212 bool stop_accumulating = false; 332 bool stop_accumulating = false;
213 bool using_shared_memory_handle = state->num_shared_memory_requests > 0; 333 bool using_shared_memory_handle = state->num_shared_memory_requests > 0;
214 switch (request.message.transport_strategy) { 334 switch (request.message.transport_strategy) {
215 case IPCBlobItemRequestStrategy::IPC: 335 case IPCBlobItemRequestStrategy::IPC:
216 byte_requests.push_back(request.message); 336 byte_requests->push_back(request.message);
217 break; 337 break;
218 case IPCBlobItemRequestStrategy::SHARED_MEMORY: 338 case IPCBlobItemRequestStrategy::SHARED_MEMORY:
219 if (using_shared_memory_handle && 339 if (using_shared_memory_handle &&
220 state->current_shared_memory_handle_index != 340 state->current_shared_memory_handle_index !=
221 request.message.handle_index) { 341 request.message.handle_index) {
222 // We only want one shared memory per requesting blob. 342 // We only want one shared memory per requesting blob.
223 stop_accumulating = true; 343 stop_accumulating = true;
224 break; 344 break;
225 } 345 }
226 using_shared_memory_handle = true; 346 using_shared_memory_handle = true;
227 state->current_shared_memory_handle_index = 347 state->current_shared_memory_handle_index =
228 request.message.handle_index; 348 request.message.handle_index;
229 state->num_shared_memory_requests++; 349 state->num_shared_memory_requests++;
230 350
231 if (!state->shared_memory_block) { 351 if (!state->shared_memory_block) {
232 state->shared_memory_block.reset(new base::SharedMemory()); 352 state->shared_memory_block.reset(new base::SharedMemory());
233 size_t size = strategy.handle_sizes()[request.message.handle_index]; 353 size_t size =
354 request_builder
355 .shared_memory_sizes()[request.message.handle_index];
234 if (!state->shared_memory_block->CreateAnonymous(size)) { 356 if (!state->shared_memory_block->CreateAnonymous(size)) {
235 DVLOG(1) << "Unable to allocate shared memory for blob transfer."; 357 DVLOG(1) << "Unable to allocate shared memory for blob transfer.";
236 CancelAndCleanup(uuid, IPCBlobCreationCancelCode::OUT_OF_MEMORY); 358 return BlobTransportResult::CANCEL_MEMORY_FULL;
237 return;
238 } 359 }
239 } 360 }
240 shared_memory.push_back(state->shared_memory_block->handle()); 361 shared_memory->push_back(state->shared_memory_block->handle());
241 byte_requests.push_back(request.message); 362 byte_requests->push_back(request.message);
242 // Since we are only using one handle at a time, transform our handle 363 // Since we are only using one handle at a time, transform our handle
243 // index correctly back to 0. 364 // index correctly back to 0.
244 byte_requests.back().handle_index = 0; 365 byte_requests->back().handle_index = 0;
245 break; 366 break;
246 case IPCBlobItemRequestStrategy::FILE: 367 case IPCBlobItemRequestStrategy::FILE:
247 case IPCBlobItemRequestStrategy::UNKNOWN: 368 case IPCBlobItemRequestStrategy::UNKNOWN:
248 NOTREACHED() << "Not implemented yet."; 369 NOTREACHED() << "Not implemented yet.";
249 break; 370 break;
250 } 371 }
251 if (stop_accumulating) { 372 if (stop_accumulating) {
252 break; 373 break;
253 } 374 }
254 } 375 }
255
256 DCHECK(!requests.empty()); 376 DCHECK(!requests.empty());
257 377
258 state->request_memory_callback.Run(byte_requests, shared_memory, files); 378 state->request_memory_callback.Run(
379 std::move(byte_requests), std::move(shared_memory),
380 make_scoped_ptr(new std::vector<base::File>()));
381 return BlobTransportResult::PENDING_RESPONSES;
259 } 382 }
260 383
261 void BlobAsyncBuilderHost::CancelAndCleanup(const std::string& uuid, 384 void BlobAsyncBuilderHost::ReferencedBlobFinished(
262 IPCBlobCreationCancelCode code) { 385 const std::string& owning_blob_uuid,
263 scoped_ptr<BlobBuildingState> state = std::move(async_blob_map_[uuid]); 386 base::WeakPtr<BlobStorageContext> context,
264 async_blob_map_.erase(uuid); 387 bool construction_success) {
265 state->cancel_callback.Run(code); 388 if (!context) {
389 return;
390 }
391 auto state_it = async_blob_map_.find(owning_blob_uuid);
392 if (state_it == async_blob_map_.end()) {
393 return;
394 }
395 if (!construction_success) {
396 CancelBuildingBlob(owning_blob_uuid,
397 IPCBlobCreationCancelCode::SOURCE_DIED_IN_TRANSIT,
398 context.get());
399 return;
400 }
401 BlobBuildingState* state = state_it->second.get();
402 DCHECK_GT(state->num_referenced_blobs_building, 0u);
403 if (--state->num_referenced_blobs_building == 0) {
404 context->CompletePendingBlob(state->data_builder);
405 async_blob_map_.erase(state->data_builder.uuid());
406 }
266 } 407 }
267 408
268 void BlobAsyncBuilderHost::DoneAndCleanup(const std::string& uuid) { 409 void BlobAsyncBuilderHost::FinishBuildingBlob(BlobBuildingState* state,
269 scoped_ptr<BlobBuildingState> state = std::move(async_blob_map_[uuid]); 410 BlobStorageContext* context) {
270 async_blob_map_.erase(uuid); 411 if (!state->referenced_blob_uuids.empty()) {
271 BlobDataBuilder* builder = state->transport_strategy.blob_builder(); 412 DCHECK_EQ(0u, state->num_referenced_blobs_building);
272 builder->set_content_type(state->type); 413 state->num_referenced_blobs_building = 0;
273 state->done_callback.Run(*builder); 414 // We assume re-entry is not possible, as RunOnConstructionComplete
415 // will schedule a task when the blob is being built. Thus we can't have the
416 // case where |num_referenced_blobs_building| reaches 0 in the
417 // ReferencedBlobFinished method before we're finished looping.
418 for (const std::string& referenced_uuid : state->referenced_blob_uuids) {
419 if (context->IsBeingBuilt(referenced_uuid)) {
420 state->num_referenced_blobs_building++;
421 context->RunOnConstructionComplete(
422 referenced_uuid,
423 base::Bind(&BlobAsyncBuilderHost::ReferencedBlobFinished,
424 ptr_factory_.GetWeakPtr(), state->data_builder.uuid(),
425 context->AsWeakPtr()));
426 }
427 }
428 if (state->num_referenced_blobs_building > 0) {
429 // We wait until referenced blobs are done.
430 return;
431 }
432 }
433 context->CompletePendingBlob(state->data_builder);
434 async_blob_map_.erase(state->data_builder.uuid());
274 } 435 }
275 436
276 } // namespace storage 437 } // namespace storage
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698