OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "net/disk_cache/blockfile/sparse_control.h" | |
6 | |
7 #include "base/bind.h" | |
8 #include "base/format_macros.h" | |
9 #include "base/logging.h" | |
10 #include "base/macros.h" | |
11 #include "base/message_loop/message_loop.h" | |
12 #include "base/strings/string_util.h" | |
13 #include "base/strings/stringprintf.h" | |
14 #include "base/time/time.h" | |
15 #include "net/base/io_buffer.h" | |
16 #include "net/base/net_errors.h" | |
17 #include "net/disk_cache/blockfile/backend_impl.h" | |
18 #include "net/disk_cache/blockfile/entry_impl.h" | |
19 #include "net/disk_cache/blockfile/file.h" | |
20 #include "net/disk_cache/net_log_parameters.h" | |
21 | |
22 using base::Time; | |
23 | |
24 namespace { | |
25 | |
26 // Stream of the sparse data index. | |
27 const int kSparseIndex = 2; | |
28 | |
29 // Stream of the sparse data. | |
30 const int kSparseData = 1; | |
31 | |
32 // We can have up to 64k children. | |
33 const int kMaxMapSize = 8 * 1024; | |
34 | |
35 // The maximum number of bytes that a child can store. | |
36 const int kMaxEntrySize = 0x100000; | |
37 | |
38 // The size of each data block (tracked by the child allocation bitmap). | |
39 const int kBlockSize = 1024; | |
40 | |
41 // Returns the name of a child entry given the base_name and signature of the | |
42 // parent and the child_id. | |
43 // If the entry is called entry_name, child entries will be named something | |
44 // like Range_entry_name:XXX:YYY where XXX is the entry signature and YYY is the | |
45 // number of the particular child. | |
46 std::string GenerateChildName(const std::string& base_name, | |
47 int64_t signature, | |
48 int64_t child_id) { | |
49 return base::StringPrintf("Range_%s:%" PRIx64 ":%" PRIx64, base_name.c_str(), | |
50 signature, child_id); | |
51 } | |
52 | |
53 // This class deletes the children of a sparse entry. | |
54 class ChildrenDeleter | |
55 : public base::RefCounted<ChildrenDeleter>, | |
56 public disk_cache::FileIOCallback { | |
57 public: | |
58 ChildrenDeleter(disk_cache::BackendImpl* backend, const std::string& name) | |
59 : backend_(backend->GetWeakPtr()), name_(name), signature_(0) {} | |
60 | |
61 void OnFileIOComplete(int bytes_copied) override; | |
62 | |
63 // Two ways of deleting the children: if we have the children map, use Start() | |
64 // directly, otherwise pass the data address to ReadData(). | |
65 void Start(char* buffer, int len); | |
66 void ReadData(disk_cache::Addr address, int len); | |
67 | |
68 private: | |
69 friend class base::RefCounted<ChildrenDeleter>; | |
70 ~ChildrenDeleter() override {} | |
71 | |
72 void DeleteChildren(); | |
73 | |
74 base::WeakPtr<disk_cache::BackendImpl> backend_; | |
75 std::string name_; | |
76 disk_cache::Bitmap children_map_; | |
77 int64_t signature_; | |
78 std::unique_ptr<char[]> buffer_; | |
79 DISALLOW_COPY_AND_ASSIGN(ChildrenDeleter); | |
80 }; | |
81 | |
82 // This is the callback of the file operation. | |
83 void ChildrenDeleter::OnFileIOComplete(int bytes_copied) { | |
84 char* buffer = buffer_.release(); | |
85 Start(buffer, bytes_copied); | |
86 } | |
87 | |
88 void ChildrenDeleter::Start(char* buffer, int len) { | |
89 buffer_.reset(buffer); | |
90 if (len < static_cast<int>(sizeof(disk_cache::SparseData))) | |
91 return Release(); | |
92 | |
93 // Just copy the information from |buffer|, delete |buffer| and start deleting | |
94 // the child entries. | |
95 disk_cache::SparseData* data = | |
96 reinterpret_cast<disk_cache::SparseData*>(buffer); | |
97 signature_ = data->header.signature; | |
98 | |
99 int num_bits = (len - sizeof(disk_cache::SparseHeader)) * 8; | |
100 children_map_.Resize(num_bits, false); | |
101 children_map_.SetMap(data->bitmap, num_bits / 32); | |
102 buffer_.reset(); | |
103 | |
104 DeleteChildren(); | |
105 } | |
106 | |
107 void ChildrenDeleter::ReadData(disk_cache::Addr address, int len) { | |
108 DCHECK(address.is_block_file()); | |
109 if (!backend_) | |
110 return Release(); | |
111 | |
112 disk_cache::File* file(backend_->File(address)); | |
113 if (!file) | |
114 return Release(); | |
115 | |
116 size_t file_offset = address.start_block() * address.BlockSize() + | |
117 disk_cache::kBlockHeaderSize; | |
118 | |
119 buffer_.reset(new char[len]); | |
120 bool completed; | |
121 if (!file->Read(buffer_.get(), len, file_offset, this, &completed)) | |
122 return Release(); | |
123 | |
124 if (completed) | |
125 OnFileIOComplete(len); | |
126 | |
127 // And wait until OnFileIOComplete gets called. | |
128 } | |
129 | |
130 void ChildrenDeleter::DeleteChildren() { | |
131 int child_id = 0; | |
132 if (!children_map_.FindNextSetBit(&child_id) || !backend_) { | |
133 // We are done. Just delete this object. | |
134 return Release(); | |
135 } | |
136 std::string child_name = GenerateChildName(name_, signature_, child_id); | |
137 backend_->SyncDoomEntry(child_name); | |
138 children_map_.Set(child_id, false); | |
139 | |
140 // Post a task to delete the next child. | |
141 base::MessageLoop::current()->PostTask( | |
142 FROM_HERE, base::Bind(&ChildrenDeleter::DeleteChildren, this)); | |
143 } | |
144 | |
145 // ----------------------------------------------------------------------- | |
146 | |
147 // Returns the NetLog event type corresponding to a SparseOperation. | |
148 net::NetLog::EventType GetSparseEventType( | |
149 disk_cache::SparseControl::SparseOperation operation) { | |
150 switch (operation) { | |
151 case disk_cache::SparseControl::kReadOperation: | |
152 return net::NetLog::TYPE_SPARSE_READ; | |
153 case disk_cache::SparseControl::kWriteOperation: | |
154 return net::NetLog::TYPE_SPARSE_WRITE; | |
155 case disk_cache::SparseControl::kGetRangeOperation: | |
156 return net::NetLog::TYPE_SPARSE_GET_RANGE; | |
157 default: | |
158 NOTREACHED(); | |
159 return net::NetLog::TYPE_CANCELLED; | |
160 } | |
161 } | |
162 | |
163 // Logs the end event for |operation| on a child entry. Range operations log | |
164 // no events for each child they search through. | |
165 void LogChildOperationEnd(const net::BoundNetLog& net_log, | |
166 disk_cache::SparseControl::SparseOperation operation, | |
167 int result) { | |
168 if (net_log.IsCapturing()) { | |
169 net::NetLog::EventType event_type; | |
170 switch (operation) { | |
171 case disk_cache::SparseControl::kReadOperation: | |
172 event_type = net::NetLog::TYPE_SPARSE_READ_CHILD_DATA; | |
173 break; | |
174 case disk_cache::SparseControl::kWriteOperation: | |
175 event_type = net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA; | |
176 break; | |
177 case disk_cache::SparseControl::kGetRangeOperation: | |
178 return; | |
179 default: | |
180 NOTREACHED(); | |
181 return; | |
182 } | |
183 net_log.EndEventWithNetErrorCode(event_type, result); | |
184 } | |
185 } | |
186 | |
187 } // namespace. | |
188 | |
189 namespace disk_cache { | |
190 | |
191 SparseControl::SparseControl(EntryImpl* entry) | |
192 : entry_(entry), | |
193 child_(NULL), | |
194 operation_(kNoOperation), | |
195 pending_(false), | |
196 finished_(false), | |
197 init_(false), | |
198 range_found_(false), | |
199 abort_(false), | |
200 child_map_(child_data_.bitmap, kNumSparseBits, kNumSparseBits / 32), | |
201 offset_(0), | |
202 buf_len_(0), | |
203 child_offset_(0), | |
204 child_len_(0), | |
205 result_(0) { | |
206 memset(&sparse_header_, 0, sizeof(sparse_header_)); | |
207 memset(&child_data_, 0, sizeof(child_data_)); | |
208 } | |
209 | |
210 SparseControl::~SparseControl() { | |
211 if (child_) | |
212 CloseChild(); | |
213 if (init_) | |
214 WriteSparseData(); | |
215 } | |
216 | |
217 bool SparseControl::CouldBeSparse() const { | |
218 DCHECK(!init_); | |
219 | |
220 if (entry_->GetDataSize(kSparseData)) | |
221 return false; | |
222 | |
223 // We don't verify the data, just see if it could be there. | |
224 return (entry_->GetDataSize(kSparseIndex) != 0); | |
225 } | |
226 | |
227 int SparseControl::StartIO(SparseOperation op, | |
228 int64_t offset, | |
229 net::IOBuffer* buf, | |
230 int buf_len, | |
231 const CompletionCallback& callback) { | |
232 DCHECK(init_); | |
233 // We don't support simultaneous IO for sparse data. | |
234 if (operation_ != kNoOperation) | |
235 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | |
236 | |
237 if (offset < 0 || buf_len < 0) | |
238 return net::ERR_INVALID_ARGUMENT; | |
239 | |
240 // We only support up to 64 GB. | |
241 if (offset + buf_len >= 0x1000000000LL || offset + buf_len < 0) | |
242 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | |
243 | |
244 DCHECK(!user_buf_); | |
245 DCHECK(user_callback_.is_null()); | |
246 | |
247 if (!buf && (op == kReadOperation || op == kWriteOperation)) | |
248 return 0; | |
249 | |
250 // Copy the operation parameters. | |
251 operation_ = op; | |
252 offset_ = offset; | |
253 user_buf_ = buf ? new net::DrainableIOBuffer(buf, buf_len) : NULL; | |
254 buf_len_ = buf_len; | |
255 user_callback_ = callback; | |
256 | |
257 result_ = 0; | |
258 pending_ = false; | |
259 finished_ = false; | |
260 abort_ = false; | |
261 | |
262 if (entry_->net_log().IsCapturing()) { | |
263 entry_->net_log().BeginEvent( | |
264 GetSparseEventType(operation_), | |
265 CreateNetLogSparseOperationCallback(offset_, buf_len_)); | |
266 } | |
267 DoChildrenIO(); | |
268 | |
269 if (!pending_) { | |
270 // Everything was done synchronously. | |
271 operation_ = kNoOperation; | |
272 user_buf_ = NULL; | |
273 user_callback_.Reset(); | |
274 return result_; | |
275 } | |
276 | |
277 return net::ERR_IO_PENDING; | |
278 } | |
279 | |
280 int SparseControl::GetAvailableRange(int64_t offset, int len, int64_t* start) { | |
281 DCHECK(init_); | |
282 // We don't support simultaneous IO for sparse data. | |
283 if (operation_ != kNoOperation) | |
284 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | |
285 | |
286 DCHECK(start); | |
287 | |
288 range_found_ = false; | |
289 int result = StartIO( | |
290 kGetRangeOperation, offset, NULL, len, CompletionCallback()); | |
291 if (range_found_) { | |
292 *start = offset_; | |
293 return result; | |
294 } | |
295 | |
296 // This is a failure. We want to return a valid start value in any case. | |
297 *start = offset; | |
298 return result < 0 ? result : 0; // Don't mask error codes to the caller. | |
299 } | |
300 | |
301 void SparseControl::CancelIO() { | |
302 if (operation_ == kNoOperation) | |
303 return; | |
304 abort_ = true; | |
305 } | |
306 | |
307 int SparseControl::ReadyToUse(const CompletionCallback& callback) { | |
308 if (!abort_) | |
309 return net::OK; | |
310 | |
311 // We'll grab another reference to keep this object alive because we just have | |
312 // one extra reference due to the pending IO operation itself, but we'll | |
313 // release that one before invoking user_callback_. | |
314 entry_->AddRef(); // Balanced in DoAbortCallbacks. | |
315 abort_callbacks_.push_back(callback); | |
316 return net::ERR_IO_PENDING; | |
317 } | |
318 | |
319 // Static | |
320 void SparseControl::DeleteChildren(EntryImpl* entry) { | |
321 DCHECK(entry->GetEntryFlags() & PARENT_ENTRY); | |
322 int data_len = entry->GetDataSize(kSparseIndex); | |
323 if (data_len < static_cast<int>(sizeof(SparseData)) || | |
324 entry->GetDataSize(kSparseData)) | |
325 return; | |
326 | |
327 int map_len = data_len - sizeof(SparseHeader); | |
328 if (map_len > kMaxMapSize || map_len % 4) | |
329 return; | |
330 | |
331 char* buffer; | |
332 Addr address; | |
333 entry->GetData(kSparseIndex, &buffer, &address); | |
334 if (!buffer && !address.is_initialized()) | |
335 return; | |
336 | |
337 entry->net_log().AddEvent(net::NetLog::TYPE_SPARSE_DELETE_CHILDREN); | |
338 | |
339 DCHECK(entry->backend_); | |
340 ChildrenDeleter* deleter = new ChildrenDeleter(entry->backend_.get(), | |
341 entry->GetKey()); | |
342 // The object will self destruct when finished. | |
343 deleter->AddRef(); | |
344 | |
345 if (buffer) { | |
346 base::MessageLoop::current()->PostTask( | |
347 FROM_HERE, | |
348 base::Bind(&ChildrenDeleter::Start, deleter, buffer, data_len)); | |
349 } else { | |
350 base::MessageLoop::current()->PostTask( | |
351 FROM_HERE, | |
352 base::Bind(&ChildrenDeleter::ReadData, deleter, address, data_len)); | |
353 } | |
354 } | |
355 | |
356 // ----------------------------------------------------------------------- | |
357 | |
358 int SparseControl::Init() { | |
359 DCHECK(!init_); | |
360 | |
361 // We should not have sparse data for the exposed entry. | |
362 if (entry_->GetDataSize(kSparseData)) | |
363 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | |
364 | |
365 // Now see if there is something where we store our data. | |
366 int rv = net::OK; | |
367 int data_len = entry_->GetDataSize(kSparseIndex); | |
368 if (!data_len) { | |
369 rv = CreateSparseEntry(); | |
370 } else { | |
371 rv = OpenSparseEntry(data_len); | |
372 } | |
373 | |
374 if (rv == net::OK) | |
375 init_ = true; | |
376 return rv; | |
377 } | |
378 | |
379 // We are going to start using this entry to store sparse data, so we have to | |
380 // initialize our control info. | |
381 int SparseControl::CreateSparseEntry() { | |
382 if (CHILD_ENTRY & entry_->GetEntryFlags()) | |
383 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | |
384 | |
385 memset(&sparse_header_, 0, sizeof(sparse_header_)); | |
386 sparse_header_.signature = Time::Now().ToInternalValue(); | |
387 sparse_header_.magic = kIndexMagic; | |
388 sparse_header_.parent_key_len = entry_->GetKey().size(); | |
389 children_map_.Resize(kNumSparseBits, true); | |
390 | |
391 // Save the header. The bitmap is saved in the destructor. | |
392 scoped_refptr<net::IOBuffer> buf( | |
393 new net::WrappedIOBuffer(reinterpret_cast<char*>(&sparse_header_))); | |
394 | |
395 int rv = entry_->WriteData(kSparseIndex, 0, buf.get(), sizeof(sparse_header_), | |
396 CompletionCallback(), false); | |
397 if (rv != sizeof(sparse_header_)) { | |
398 DLOG(ERROR) << "Unable to save sparse_header_"; | |
399 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | |
400 } | |
401 | |
402 entry_->SetEntryFlags(PARENT_ENTRY); | |
403 return net::OK; | |
404 } | |
405 | |
406 // We are opening an entry from disk. Make sure that our control data is there. | |
407 int SparseControl::OpenSparseEntry(int data_len) { | |
408 if (data_len < static_cast<int>(sizeof(SparseData))) | |
409 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | |
410 | |
411 if (entry_->GetDataSize(kSparseData)) | |
412 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | |
413 | |
414 if (!(PARENT_ENTRY & entry_->GetEntryFlags())) | |
415 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | |
416 | |
417 // Dont't go over board with the bitmap. 8 KB gives us offsets up to 64 GB. | |
418 int map_len = data_len - sizeof(sparse_header_); | |
419 if (map_len > kMaxMapSize || map_len % 4) | |
420 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | |
421 | |
422 scoped_refptr<net::IOBuffer> buf( | |
423 new net::WrappedIOBuffer(reinterpret_cast<char*>(&sparse_header_))); | |
424 | |
425 // Read header. | |
426 int rv = entry_->ReadData(kSparseIndex, 0, buf.get(), sizeof(sparse_header_), | |
427 CompletionCallback()); | |
428 if (rv != static_cast<int>(sizeof(sparse_header_))) | |
429 return net::ERR_CACHE_READ_FAILURE; | |
430 | |
431 // The real validation should be performed by the caller. This is just to | |
432 // double check. | |
433 if (sparse_header_.magic != kIndexMagic || | |
434 sparse_header_.parent_key_len != | |
435 static_cast<int>(entry_->GetKey().size())) | |
436 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | |
437 | |
438 // Read the actual bitmap. | |
439 buf = new net::IOBuffer(map_len); | |
440 rv = entry_->ReadData(kSparseIndex, sizeof(sparse_header_), buf.get(), | |
441 map_len, CompletionCallback()); | |
442 if (rv != map_len) | |
443 return net::ERR_CACHE_READ_FAILURE; | |
444 | |
445 // Grow the bitmap to the current size and copy the bits. | |
446 children_map_.Resize(map_len * 8, false); | |
447 children_map_.SetMap(reinterpret_cast<uint32_t*>(buf->data()), map_len); | |
448 return net::OK; | |
449 } | |
450 | |
451 bool SparseControl::OpenChild() { | |
452 DCHECK_GE(result_, 0); | |
453 | |
454 std::string key = GenerateChildKey(); | |
455 if (child_) { | |
456 // Keep using the same child or open another one?. | |
457 if (key == child_->GetKey()) | |
458 return true; | |
459 CloseChild(); | |
460 } | |
461 | |
462 // See if we are tracking this child. | |
463 if (!ChildPresent()) | |
464 return ContinueWithoutChild(key); | |
465 | |
466 if (!entry_->backend_) | |
467 return false; | |
468 | |
469 child_ = entry_->backend_->OpenEntryImpl(key); | |
470 if (!child_) | |
471 return ContinueWithoutChild(key); | |
472 | |
473 EntryImpl* child = static_cast<EntryImpl*>(child_); | |
474 if (!(CHILD_ENTRY & child->GetEntryFlags()) || | |
475 child->GetDataSize(kSparseIndex) < | |
476 static_cast<int>(sizeof(child_data_))) | |
477 return KillChildAndContinue(key, false); | |
478 | |
479 scoped_refptr<net::WrappedIOBuffer> buf( | |
480 new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_))); | |
481 | |
482 // Read signature. | |
483 int rv = child_->ReadData(kSparseIndex, 0, buf.get(), sizeof(child_data_), | |
484 CompletionCallback()); | |
485 if (rv != sizeof(child_data_)) | |
486 return KillChildAndContinue(key, true); // This is a fatal failure. | |
487 | |
488 if (child_data_.header.signature != sparse_header_.signature || | |
489 child_data_.header.magic != kIndexMagic) | |
490 return KillChildAndContinue(key, false); | |
491 | |
492 if (child_data_.header.last_block_len < 0 || | |
493 child_data_.header.last_block_len > kBlockSize) { | |
494 // Make sure these values are always within range. | |
495 child_data_.header.last_block_len = 0; | |
496 child_data_.header.last_block = -1; | |
497 } | |
498 | |
499 return true; | |
500 } | |
501 | |
502 void SparseControl::CloseChild() { | |
503 scoped_refptr<net::WrappedIOBuffer> buf( | |
504 new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_))); | |
505 | |
506 // Save the allocation bitmap before closing the child entry. | |
507 int rv = child_->WriteData(kSparseIndex, 0, buf.get(), sizeof(child_data_), | |
508 CompletionCallback(), | |
509 false); | |
510 if (rv != sizeof(child_data_)) | |
511 DLOG(ERROR) << "Failed to save child data"; | |
512 child_->Release(); | |
513 child_ = NULL; | |
514 } | |
515 | |
516 // We were not able to open this child; see what we can do. | |
517 bool SparseControl::ContinueWithoutChild(const std::string& key) { | |
518 if (kReadOperation == operation_) | |
519 return false; | |
520 if (kGetRangeOperation == operation_) | |
521 return true; | |
522 | |
523 if (!entry_->backend_) | |
524 return false; | |
525 | |
526 child_ = entry_->backend_->CreateEntryImpl(key); | |
527 if (!child_) { | |
528 child_ = NULL; | |
529 result_ = net::ERR_CACHE_READ_FAILURE; | |
530 return false; | |
531 } | |
532 // Write signature. | |
533 InitChildData(); | |
534 return true; | |
535 } | |
536 | |
537 void SparseControl::WriteSparseData() { | |
538 scoped_refptr<net::IOBuffer> buf(new net::WrappedIOBuffer( | |
539 reinterpret_cast<const char*>(children_map_.GetMap()))); | |
540 | |
541 int len = children_map_.ArraySize() * 4; | |
542 int rv = entry_->WriteData(kSparseIndex, sizeof(sparse_header_), buf.get(), | |
543 len, CompletionCallback(), false); | |
544 if (rv != len) { | |
545 DLOG(ERROR) << "Unable to save sparse map"; | |
546 } | |
547 } | |
548 | |
549 bool SparseControl::DoChildIO() { | |
550 finished_ = true; | |
551 if (!buf_len_ || result_ < 0) | |
552 return false; | |
553 | |
554 if (!OpenChild()) | |
555 return false; | |
556 | |
557 if (!VerifyRange()) | |
558 return false; | |
559 | |
560 // We have more work to do. Let's not trigger a callback to the caller. | |
561 finished_ = false; | |
562 CompletionCallback callback; | |
563 if (!user_callback_.is_null()) { | |
564 callback = | |
565 base::Bind(&SparseControl::OnChildIOCompleted, base::Unretained(this)); | |
566 } | |
567 | |
568 int rv = 0; | |
569 switch (operation_) { | |
570 case kReadOperation: | |
571 if (entry_->net_log().IsCapturing()) { | |
572 entry_->net_log().BeginEvent( | |
573 net::NetLog::TYPE_SPARSE_READ_CHILD_DATA, | |
574 CreateNetLogSparseReadWriteCallback(child_->net_log().source(), | |
575 child_len_)); | |
576 } | |
577 rv = child_->ReadDataImpl(kSparseData, child_offset_, user_buf_.get(), | |
578 child_len_, callback); | |
579 break; | |
580 case kWriteOperation: | |
581 if (entry_->net_log().IsCapturing()) { | |
582 entry_->net_log().BeginEvent( | |
583 net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA, | |
584 CreateNetLogSparseReadWriteCallback(child_->net_log().source(), | |
585 child_len_)); | |
586 } | |
587 rv = child_->WriteDataImpl(kSparseData, child_offset_, user_buf_.get(), | |
588 child_len_, callback, false); | |
589 break; | |
590 case kGetRangeOperation: | |
591 rv = DoGetAvailableRange(); | |
592 break; | |
593 default: | |
594 NOTREACHED(); | |
595 } | |
596 | |
597 if (rv == net::ERR_IO_PENDING) { | |
598 if (!pending_) { | |
599 pending_ = true; | |
600 // The child will protect himself against closing the entry while IO is in | |
601 // progress. However, this entry can still be closed, and that would not | |
602 // be a good thing for us, so we increase the refcount until we're | |
603 // finished doing sparse stuff. | |
604 entry_->AddRef(); // Balanced in DoUserCallback. | |
605 } | |
606 return false; | |
607 } | |
608 if (!rv) | |
609 return false; | |
610 | |
611 DoChildIOCompleted(rv); | |
612 return true; | |
613 } | |
614 | |
615 void SparseControl::DoChildIOCompleted(int result) { | |
616 LogChildOperationEnd(entry_->net_log(), operation_, result); | |
617 if (result < 0) { | |
618 // We fail the whole operation if we encounter an error. | |
619 result_ = result; | |
620 return; | |
621 } | |
622 | |
623 UpdateRange(result); | |
624 | |
625 result_ += result; | |
626 offset_ += result; | |
627 buf_len_ -= result; | |
628 | |
629 // We'll be reusing the user provided buffer for the next chunk. | |
630 if (buf_len_ && user_buf_) | |
631 user_buf_->DidConsume(result); | |
632 } | |
633 | |
634 std::string SparseControl::GenerateChildKey() { | |
635 return GenerateChildName(entry_->GetKey(), sparse_header_.signature, | |
636 offset_ >> 20); | |
637 } | |
638 | |
639 // We are deleting the child because something went wrong. | |
640 bool SparseControl::KillChildAndContinue(const std::string& key, bool fatal) { | |
641 SetChildBit(false); | |
642 child_->DoomImpl(); | |
643 child_->Release(); | |
644 child_ = NULL; | |
645 if (fatal) { | |
646 result_ = net::ERR_CACHE_READ_FAILURE; | |
647 return false; | |
648 } | |
649 return ContinueWithoutChild(key); | |
650 } | |
651 | |
652 bool SparseControl::ChildPresent() { | |
653 int child_bit = static_cast<int>(offset_ >> 20); | |
654 if (children_map_.Size() <= child_bit) | |
655 return false; | |
656 | |
657 return children_map_.Get(child_bit); | |
658 } | |
659 | |
660 void SparseControl::SetChildBit(bool value) { | |
661 int child_bit = static_cast<int>(offset_ >> 20); | |
662 | |
663 // We may have to increase the bitmap of child entries. | |
664 if (children_map_.Size() <= child_bit) | |
665 children_map_.Resize(Bitmap::RequiredArraySize(child_bit + 1) * 32, true); | |
666 | |
667 children_map_.Set(child_bit, value); | |
668 } | |
669 | |
670 bool SparseControl::VerifyRange() { | |
671 DCHECK_GE(result_, 0); | |
672 | |
673 child_offset_ = static_cast<int>(offset_) & (kMaxEntrySize - 1); | |
674 child_len_ = std::min(buf_len_, kMaxEntrySize - child_offset_); | |
675 | |
676 // We can write to (or get info from) anywhere in this child. | |
677 if (operation_ != kReadOperation) | |
678 return true; | |
679 | |
680 // Check that there are no holes in this range. | |
681 int last_bit = (child_offset_ + child_len_ + 1023) >> 10; | |
682 int start = child_offset_ >> 10; | |
683 if (child_map_.FindNextBit(&start, last_bit, false)) { | |
684 // Something is not here. | |
685 DCHECK_GE(child_data_.header.last_block_len, 0); | |
686 DCHECK_LT(child_data_.header.last_block_len, kMaxEntrySize); | |
687 int partial_block_len = PartialBlockLength(start); | |
688 if (start == child_offset_ >> 10) { | |
689 // It looks like we don't have anything. | |
690 if (partial_block_len <= (child_offset_ & (kBlockSize - 1))) | |
691 return false; | |
692 } | |
693 | |
694 // We have the first part. | |
695 child_len_ = (start << 10) - child_offset_; | |
696 if (partial_block_len) { | |
697 // We may have a few extra bytes. | |
698 child_len_ = std::min(child_len_ + partial_block_len, buf_len_); | |
699 } | |
700 // There is no need to read more after this one. | |
701 buf_len_ = child_len_; | |
702 } | |
703 return true; | |
704 } | |
705 | |
706 void SparseControl::UpdateRange(int result) { | |
707 if (result <= 0 || operation_ != kWriteOperation) | |
708 return; | |
709 | |
710 DCHECK_GE(child_data_.header.last_block_len, 0); | |
711 DCHECK_LT(child_data_.header.last_block_len, kMaxEntrySize); | |
712 | |
713 // Write the bitmap. | |
714 int first_bit = child_offset_ >> 10; | |
715 int block_offset = child_offset_ & (kBlockSize - 1); | |
716 if (block_offset && (child_data_.header.last_block != first_bit || | |
717 child_data_.header.last_block_len < block_offset)) { | |
718 // The first block is not completely filled; ignore it. | |
719 first_bit++; | |
720 } | |
721 | |
722 int last_bit = (child_offset_ + result) >> 10; | |
723 block_offset = (child_offset_ + result) & (kBlockSize - 1); | |
724 | |
725 // This condition will hit with the following criteria: | |
726 // 1. The first byte doesn't follow the last write. | |
727 // 2. The first byte is in the middle of a block. | |
728 // 3. The first byte and the last byte are in the same block. | |
729 if (first_bit > last_bit) | |
730 return; | |
731 | |
732 if (block_offset && !child_map_.Get(last_bit)) { | |
733 // The last block is not completely filled; save it for later. | |
734 child_data_.header.last_block = last_bit; | |
735 child_data_.header.last_block_len = block_offset; | |
736 } else { | |
737 child_data_.header.last_block = -1; | |
738 } | |
739 | |
740 child_map_.SetRange(first_bit, last_bit, true); | |
741 } | |
742 | |
743 int SparseControl::PartialBlockLength(int block_index) const { | |
744 if (block_index == child_data_.header.last_block) | |
745 return child_data_.header.last_block_len; | |
746 | |
747 // This may be the last stored index. | |
748 int entry_len = child_->GetDataSize(kSparseData); | |
749 if (block_index == entry_len >> 10) | |
750 return entry_len & (kBlockSize - 1); | |
751 | |
752 // This is really empty. | |
753 return 0; | |
754 } | |
755 | |
756 void SparseControl::InitChildData() { | |
757 // We know the real type of child_. | |
758 EntryImpl* child = static_cast<EntryImpl*>(child_); | |
759 child->SetEntryFlags(CHILD_ENTRY); | |
760 | |
761 memset(&child_data_, 0, sizeof(child_data_)); | |
762 child_data_.header = sparse_header_; | |
763 | |
764 scoped_refptr<net::WrappedIOBuffer> buf( | |
765 new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_))); | |
766 | |
767 int rv = child_->WriteData(kSparseIndex, 0, buf.get(), sizeof(child_data_), | |
768 CompletionCallback(), false); | |
769 if (rv != sizeof(child_data_)) | |
770 DLOG(ERROR) << "Failed to save child data"; | |
771 SetChildBit(true); | |
772 } | |
773 | |
774 int SparseControl::DoGetAvailableRange() { | |
775 if (!child_) | |
776 return child_len_; // Move on to the next child. | |
777 | |
778 // Check that there are no holes in this range. | |
779 int last_bit = (child_offset_ + child_len_ + 1023) >> 10; | |
780 int start = child_offset_ >> 10; | |
781 int partial_start_bytes = PartialBlockLength(start); | |
782 int found = start; | |
783 int bits_found = child_map_.FindBits(&found, last_bit, true); | |
784 | |
785 // We don't care if there is a partial block in the middle of the range. | |
786 int block_offset = child_offset_ & (kBlockSize - 1); | |
787 if (!bits_found && partial_start_bytes <= block_offset) | |
788 return child_len_; | |
789 | |
790 // We are done. Just break the loop and reset result_ to our real result. | |
791 range_found_ = true; | |
792 | |
793 // found now points to the first 1. Lets see if we have zeros before it. | |
794 int empty_start = std::max((found << 10) - child_offset_, 0); | |
795 | |
796 int bytes_found = bits_found << 10; | |
797 bytes_found += PartialBlockLength(found + bits_found); | |
798 | |
799 if (start == found) | |
800 bytes_found -= block_offset; | |
801 | |
802 // If the user is searching past the end of this child, bits_found is the | |
803 // right result; otherwise, we have some empty space at the start of this | |
804 // query that we have to subtract from the range that we searched. | |
805 result_ = std::min(bytes_found, child_len_ - empty_start); | |
806 | |
807 if (!bits_found) { | |
808 result_ = std::min(partial_start_bytes - block_offset, child_len_); | |
809 empty_start = 0; | |
810 } | |
811 | |
812 // Only update offset_ when this query found zeros at the start. | |
813 if (empty_start) | |
814 offset_ += empty_start; | |
815 | |
816 // This will actually break the loop. | |
817 buf_len_ = 0; | |
818 return 0; | |
819 } | |
820 | |
821 void SparseControl::DoUserCallback() { | |
822 DCHECK(!user_callback_.is_null()); | |
823 CompletionCallback cb = user_callback_; | |
824 user_callback_.Reset(); | |
825 user_buf_ = NULL; | |
826 pending_ = false; | |
827 operation_ = kNoOperation; | |
828 int rv = result_; | |
829 entry_->Release(); // Don't touch object after this line. | |
830 cb.Run(rv); | |
831 } | |
832 | |
833 void SparseControl::DoAbortCallbacks() { | |
834 for (size_t i = 0; i < abort_callbacks_.size(); i++) { | |
835 // Releasing all references to entry_ may result in the destruction of this | |
836 // object so we should not be touching it after the last Release(). | |
837 CompletionCallback cb = abort_callbacks_[i]; | |
838 if (i == abort_callbacks_.size() - 1) | |
839 abort_callbacks_.clear(); | |
840 | |
841 entry_->Release(); // Don't touch object after this line. | |
842 cb.Run(net::OK); | |
843 } | |
844 } | |
845 | |
846 void SparseControl::OnChildIOCompleted(int result) { | |
847 DCHECK_NE(net::ERR_IO_PENDING, result); | |
848 DoChildIOCompleted(result); | |
849 | |
850 if (abort_) { | |
851 // We'll return the current result of the operation, which may be less than | |
852 // the bytes to read or write, but the user cancelled the operation. | |
853 abort_ = false; | |
854 if (entry_->net_log().IsCapturing()) { | |
855 entry_->net_log().AddEvent(net::NetLog::TYPE_CANCELLED); | |
856 entry_->net_log().EndEvent(GetSparseEventType(operation_)); | |
857 } | |
858 // We have an indirect reference to this object for every callback so if | |
859 // there is only one callback, we may delete this object before reaching | |
860 // DoAbortCallbacks. | |
861 bool has_abort_callbacks = !abort_callbacks_.empty(); | |
862 DoUserCallback(); | |
863 if (has_abort_callbacks) | |
864 DoAbortCallbacks(); | |
865 return; | |
866 } | |
867 | |
868 // We are running a callback from the message loop. It's time to restart what | |
869 // we were doing before. | |
870 DoChildrenIO(); | |
871 } | |
872 | |
873 } // namespace disk_cache | |
OLD | NEW |