OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "net/disk_cache/memory/mem_entry_impl.h" | |
6 | |
7 #include "base/bind.h" | |
8 #include "base/logging.h" | |
9 #include "base/strings/stringprintf.h" | |
10 #include "base/values.h" | |
11 #include "net/base/io_buffer.h" | |
12 #include "net/base/net_errors.h" | |
13 #include "net/disk_cache/memory/mem_backend_impl.h" | |
14 #include "net/disk_cache/net_log_parameters.h" | |
15 | |
16 using base::Time; | |
17 | |
18 namespace { | |
19 | |
20 const int kSparseData = 1; | |
21 | |
22 // Maximum size of a sparse entry is 2 to the power of this number. | |
23 const int kMaxSparseEntryBits = 12; | |
24 | |
25 // Sparse entry has maximum size of 4KB. | |
26 const int kMaxSparseEntrySize = 1 << kMaxSparseEntryBits; | |
27 | |
28 // Convert global offset to child index. | |
29 inline int ToChildIndex(int64 offset) { | |
30 return static_cast<int>(offset >> kMaxSparseEntryBits); | |
31 } | |
32 | |
33 // Convert global offset to offset in child entry. | |
34 inline int ToChildOffset(int64 offset) { | |
35 return static_cast<int>(offset & (kMaxSparseEntrySize - 1)); | |
36 } | |
37 | |
38 // Returns a name for a child entry given the base_name of the parent and the | |
39 // child_id. This name is only used for logging purposes. | |
40 // If the entry is called entry_name, child entries will be named something | |
41 // like Range_entry_name:YYY where YYY is the number of the particular child. | |
42 std::string GenerateChildName(const std::string& base_name, int child_id) { | |
43 return base::StringPrintf("Range_%s:%i", base_name.c_str(), child_id); | |
44 } | |
45 | |
46 // Returns NetLog parameters for the creation of a child MemEntryImpl. Separate | |
47 // function needed because child entries don't suppport GetKey(). | |
48 base::Value* NetLogChildEntryCreationCallback( | |
49 const disk_cache::MemEntryImpl* parent, | |
50 int child_id, | |
51 net::NetLog::LogLevel /* log_level */) { | |
52 base::DictionaryValue* dict = new base::DictionaryValue(); | |
53 dict->SetString("key", GenerateChildName(parent->GetKey(), child_id)); | |
54 dict->SetBoolean("created", true); | |
55 return dict; | |
56 } | |
57 | |
58 } // namespace | |
59 | |
60 namespace disk_cache { | |
61 | |
62 MemEntryImpl::MemEntryImpl(MemBackendImpl* backend) { | |
63 doomed_ = false; | |
64 backend_ = backend; | |
65 ref_count_ = 0; | |
66 parent_ = NULL; | |
67 child_id_ = 0; | |
68 child_first_pos_ = 0; | |
69 next_ = NULL; | |
70 prev_ = NULL; | |
71 for (int i = 0; i < NUM_STREAMS; i++) | |
72 data_size_[i] = 0; | |
73 } | |
74 | |
75 // ------------------------------------------------------------------------ | |
76 | |
77 bool MemEntryImpl::CreateEntry(const std::string& key, net::NetLog* net_log) { | |
78 key_ = key; | |
79 Time current = Time::Now(); | |
80 last_modified_ = current; | |
81 last_used_ = current; | |
82 | |
83 net_log_ = net::BoundNetLog::Make(net_log, | |
84 net::NetLog::SOURCE_MEMORY_CACHE_ENTRY); | |
85 // Must be called after |key_| is set, so GetKey() works. | |
86 net_log_.BeginEvent( | |
87 net::NetLog::TYPE_DISK_CACHE_MEM_ENTRY_IMPL, | |
88 CreateNetLogEntryCreationCallback(this, true)); | |
89 | |
90 Open(); | |
91 backend_->ModifyStorageSize(0, static_cast<int32>(key.size())); | |
92 return true; | |
93 } | |
94 | |
95 void MemEntryImpl::InternalDoom() { | |
96 net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM); | |
97 doomed_ = true; | |
98 if (!ref_count_) { | |
99 if (type() == kParentEntry) { | |
100 // If this is a parent entry, we need to doom all the child entries. | |
101 if (children_.get()) { | |
102 EntryMap children; | |
103 children.swap(*children_); | |
104 for (EntryMap::iterator i = children.begin(); | |
105 i != children.end(); ++i) { | |
106 // Since a pointer to this object is also saved in the map, avoid | |
107 // dooming it. | |
108 if (i->second != this) | |
109 i->second->Doom(); | |
110 } | |
111 DCHECK(children_->empty()); | |
112 } | |
113 } else { | |
114 // If this is a child entry, detach it from the parent. | |
115 parent_->DetachChild(child_id_); | |
116 } | |
117 delete this; | |
118 } | |
119 } | |
120 | |
121 void MemEntryImpl::Open() { | |
122 // Only a parent entry can be opened. | |
123 // TODO(hclam): make sure it's correct to not apply the concept of ref | |
124 // counting to child entry. | |
125 DCHECK(type() == kParentEntry); | |
126 ref_count_++; | |
127 DCHECK_GE(ref_count_, 0); | |
128 DCHECK(!doomed_); | |
129 } | |
130 | |
131 bool MemEntryImpl::InUse() { | |
132 if (type() == kParentEntry) { | |
133 return ref_count_ > 0; | |
134 } else { | |
135 // A child entry is always not in use. The consequence is that a child entry | |
136 // can always be evicted while the associated parent entry is currently in | |
137 // used (i.e. opened). | |
138 return false; | |
139 } | |
140 } | |
141 | |
142 // ------------------------------------------------------------------------ | |
143 | |
144 void MemEntryImpl::Doom() { | |
145 if (doomed_) | |
146 return; | |
147 if (type() == kParentEntry) { | |
148 // Perform internal doom from the backend if this is a parent entry. | |
149 backend_->InternalDoomEntry(this); | |
150 } else { | |
151 // Manually detach from the backend and perform internal doom. | |
152 backend_->RemoveFromRankingList(this); | |
153 InternalDoom(); | |
154 } | |
155 } | |
156 | |
157 void MemEntryImpl::Close() { | |
158 // Only a parent entry can be closed. | |
159 DCHECK(type() == kParentEntry); | |
160 ref_count_--; | |
161 DCHECK_GE(ref_count_, 0); | |
162 if (!ref_count_ && doomed_) | |
163 InternalDoom(); | |
164 } | |
165 | |
166 std::string MemEntryImpl::GetKey() const { | |
167 // A child entry doesn't have key so this method should not be called. | |
168 DCHECK(type() == kParentEntry); | |
169 return key_; | |
170 } | |
171 | |
172 Time MemEntryImpl::GetLastUsed() const { | |
173 return last_used_; | |
174 } | |
175 | |
176 Time MemEntryImpl::GetLastModified() const { | |
177 return last_modified_; | |
178 } | |
179 | |
180 int32 MemEntryImpl::GetDataSize(int index) const { | |
181 if (index < 0 || index >= NUM_STREAMS) | |
182 return 0; | |
183 return data_size_[index]; | |
184 } | |
185 | |
186 int MemEntryImpl::ReadData(int index, int offset, IOBuffer* buf, int buf_len, | |
187 const CompletionCallback& callback) { | |
188 if (net_log_.IsLogging()) { | |
189 net_log_.BeginEvent( | |
190 net::NetLog::TYPE_ENTRY_READ_DATA, | |
191 CreateNetLogReadWriteDataCallback(index, offset, buf_len, false)); | |
192 } | |
193 | |
194 int result = InternalReadData(index, offset, buf, buf_len); | |
195 | |
196 if (net_log_.IsLogging()) { | |
197 net_log_.EndEvent( | |
198 net::NetLog::TYPE_ENTRY_READ_DATA, | |
199 CreateNetLogReadWriteCompleteCallback(result)); | |
200 } | |
201 return result; | |
202 } | |
203 | |
204 int MemEntryImpl::WriteData(int index, int offset, IOBuffer* buf, int buf_len, | |
205 const CompletionCallback& callback, bool truncate) { | |
206 if (net_log_.IsLogging()) { | |
207 net_log_.BeginEvent( | |
208 net::NetLog::TYPE_ENTRY_WRITE_DATA, | |
209 CreateNetLogReadWriteDataCallback(index, offset, buf_len, truncate)); | |
210 } | |
211 | |
212 int result = InternalWriteData(index, offset, buf, buf_len, truncate); | |
213 | |
214 if (net_log_.IsLogging()) { | |
215 net_log_.EndEvent( | |
216 net::NetLog::TYPE_ENTRY_WRITE_DATA, | |
217 CreateNetLogReadWriteCompleteCallback(result)); | |
218 } | |
219 return result; | |
220 } | |
221 | |
222 int MemEntryImpl::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len, | |
223 const CompletionCallback& callback) { | |
224 if (net_log_.IsLogging()) { | |
225 net_log_.BeginEvent( | |
226 net::NetLog::TYPE_SPARSE_READ, | |
227 CreateNetLogSparseOperationCallback(offset, buf_len)); | |
228 } | |
229 int result = InternalReadSparseData(offset, buf, buf_len); | |
230 if (net_log_.IsLogging()) | |
231 net_log_.EndEvent(net::NetLog::TYPE_SPARSE_READ); | |
232 return result; | |
233 } | |
234 | |
235 int MemEntryImpl::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len, | |
236 const CompletionCallback& callback) { | |
237 if (net_log_.IsLogging()) { | |
238 net_log_.BeginEvent( | |
239 net::NetLog::TYPE_SPARSE_WRITE, | |
240 CreateNetLogSparseOperationCallback(offset, buf_len)); | |
241 } | |
242 int result = InternalWriteSparseData(offset, buf, buf_len); | |
243 if (net_log_.IsLogging()) | |
244 net_log_.EndEvent(net::NetLog::TYPE_SPARSE_WRITE); | |
245 return result; | |
246 } | |
247 | |
248 int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start, | |
249 const CompletionCallback& callback) { | |
250 if (net_log_.IsLogging()) { | |
251 net_log_.BeginEvent( | |
252 net::NetLog::TYPE_SPARSE_GET_RANGE, | |
253 CreateNetLogSparseOperationCallback(offset, len)); | |
254 } | |
255 int result = GetAvailableRange(offset, len, start); | |
256 if (net_log_.IsLogging()) { | |
257 net_log_.EndEvent( | |
258 net::NetLog::TYPE_SPARSE_GET_RANGE, | |
259 CreateNetLogGetAvailableRangeResultCallback(*start, result)); | |
260 } | |
261 return result; | |
262 } | |
263 | |
264 bool MemEntryImpl::CouldBeSparse() const { | |
265 DCHECK_EQ(kParentEntry, type()); | |
266 return (children_.get() != NULL); | |
267 } | |
268 | |
269 int MemEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) { | |
270 return net::OK; | |
271 } | |
272 | |
273 // ------------------------------------------------------------------------ | |
274 | |
275 MemEntryImpl::~MemEntryImpl() { | |
276 for (int i = 0; i < NUM_STREAMS; i++) | |
277 backend_->ModifyStorageSize(data_size_[i], 0); | |
278 backend_->ModifyStorageSize(static_cast<int32>(key_.size()), 0); | |
279 net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_MEM_ENTRY_IMPL); | |
280 } | |
281 | |
282 int MemEntryImpl::InternalReadData(int index, int offset, IOBuffer* buf, | |
283 int buf_len) { | |
284 DCHECK(type() == kParentEntry || index == kSparseData); | |
285 | |
286 if (index < 0 || index >= NUM_STREAMS) | |
287 return net::ERR_INVALID_ARGUMENT; | |
288 | |
289 int entry_size = GetDataSize(index); | |
290 if (offset >= entry_size || offset < 0 || !buf_len) | |
291 return 0; | |
292 | |
293 if (buf_len < 0) | |
294 return net::ERR_INVALID_ARGUMENT; | |
295 | |
296 if (offset + buf_len > entry_size) | |
297 buf_len = entry_size - offset; | |
298 | |
299 UpdateRank(false); | |
300 | |
301 memcpy(buf->data(), &(data_[index])[offset], buf_len); | |
302 return buf_len; | |
303 } | |
304 | |
305 int MemEntryImpl::InternalWriteData(int index, int offset, IOBuffer* buf, | |
306 int buf_len, bool truncate) { | |
307 DCHECK(type() == kParentEntry || index == kSparseData); | |
308 | |
309 if (index < 0 || index >= NUM_STREAMS) | |
310 return net::ERR_INVALID_ARGUMENT; | |
311 | |
312 if (offset < 0 || buf_len < 0) | |
313 return net::ERR_INVALID_ARGUMENT; | |
314 | |
315 int max_file_size = backend_->MaxFileSize(); | |
316 | |
317 // offset of buf_len could be negative numbers. | |
318 if (offset > max_file_size || buf_len > max_file_size || | |
319 offset + buf_len > max_file_size) { | |
320 return net::ERR_FAILED; | |
321 } | |
322 | |
323 // Read the size at this point. | |
324 int entry_size = GetDataSize(index); | |
325 | |
326 PrepareTarget(index, offset, buf_len); | |
327 | |
328 if (entry_size < offset + buf_len) { | |
329 backend_->ModifyStorageSize(entry_size, offset + buf_len); | |
330 data_size_[index] = offset + buf_len; | |
331 } else if (truncate) { | |
332 if (entry_size > offset + buf_len) { | |
333 backend_->ModifyStorageSize(entry_size, offset + buf_len); | |
334 data_size_[index] = offset + buf_len; | |
335 } | |
336 } | |
337 | |
338 UpdateRank(true); | |
339 | |
340 if (!buf_len) | |
341 return 0; | |
342 | |
343 memcpy(&(data_[index])[offset], buf->data(), buf_len); | |
344 return buf_len; | |
345 } | |
346 | |
347 int MemEntryImpl::InternalReadSparseData(int64 offset, IOBuffer* buf, | |
348 int buf_len) { | |
349 DCHECK(type() == kParentEntry); | |
350 | |
351 if (!InitSparseInfo()) | |
352 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | |
353 | |
354 if (offset < 0 || buf_len < 0) | |
355 return net::ERR_INVALID_ARGUMENT; | |
356 | |
357 // We will keep using this buffer and adjust the offset in this buffer. | |
358 scoped_refptr<net::DrainableIOBuffer> io_buf( | |
359 new net::DrainableIOBuffer(buf, buf_len)); | |
360 | |
361 // Iterate until we have read enough. | |
362 while (io_buf->BytesRemaining()) { | |
363 MemEntryImpl* child = OpenChild(offset + io_buf->BytesConsumed(), false); | |
364 | |
365 // No child present for that offset. | |
366 if (!child) | |
367 break; | |
368 | |
369 // We then need to prepare the child offset and len. | |
370 int child_offset = ToChildOffset(offset + io_buf->BytesConsumed()); | |
371 | |
372 // If we are trying to read from a position that the child entry has no data | |
373 // we should stop. | |
374 if (child_offset < child->child_first_pos_) | |
375 break; | |
376 if (net_log_.IsLogging()) { | |
377 net_log_.BeginEvent( | |
378 net::NetLog::TYPE_SPARSE_READ_CHILD_DATA, | |
379 CreateNetLogSparseReadWriteCallback(child->net_log().source(), | |
380 io_buf->BytesRemaining())); | |
381 } | |
382 int ret = child->ReadData(kSparseData, child_offset, io_buf.get(), | |
383 io_buf->BytesRemaining(), CompletionCallback()); | |
384 if (net_log_.IsLogging()) { | |
385 net_log_.EndEventWithNetErrorCode( | |
386 net::NetLog::TYPE_SPARSE_READ_CHILD_DATA, ret); | |
387 } | |
388 | |
389 // If we encounter an error in one entry, return immediately. | |
390 if (ret < 0) | |
391 return ret; | |
392 else if (ret == 0) | |
393 break; | |
394 | |
395 // Increment the counter by number of bytes read in the child entry. | |
396 io_buf->DidConsume(ret); | |
397 } | |
398 | |
399 UpdateRank(false); | |
400 | |
401 return io_buf->BytesConsumed(); | |
402 } | |
403 | |
404 int MemEntryImpl::InternalWriteSparseData(int64 offset, IOBuffer* buf, | |
405 int buf_len) { | |
406 DCHECK(type() == kParentEntry); | |
407 | |
408 if (!InitSparseInfo()) | |
409 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | |
410 | |
411 if (offset < 0 || buf_len < 0) | |
412 return net::ERR_INVALID_ARGUMENT; | |
413 | |
414 scoped_refptr<net::DrainableIOBuffer> io_buf( | |
415 new net::DrainableIOBuffer(buf, buf_len)); | |
416 | |
417 // This loop walks through child entries continuously starting from |offset| | |
418 // and writes blocks of data (of maximum size kMaxSparseEntrySize) into each | |
419 // child entry until all |buf_len| bytes are written. The write operation can | |
420 // start in the middle of an entry. | |
421 while (io_buf->BytesRemaining()) { | |
422 MemEntryImpl* child = OpenChild(offset + io_buf->BytesConsumed(), true); | |
423 int child_offset = ToChildOffset(offset + io_buf->BytesConsumed()); | |
424 | |
425 // Find the right amount to write, this evaluates the remaining bytes to | |
426 // write and remaining capacity of this child entry. | |
427 int write_len = std::min(static_cast<int>(io_buf->BytesRemaining()), | |
428 kMaxSparseEntrySize - child_offset); | |
429 | |
430 // Keep a record of the last byte position (exclusive) in the child. | |
431 int data_size = child->GetDataSize(kSparseData); | |
432 | |
433 if (net_log_.IsLogging()) { | |
434 net_log_.BeginEvent( | |
435 net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA, | |
436 CreateNetLogSparseReadWriteCallback(child->net_log().source(), | |
437 write_len)); | |
438 } | |
439 | |
440 // Always writes to the child entry. This operation may overwrite data | |
441 // previously written. | |
442 // TODO(hclam): if there is data in the entry and this write is not | |
443 // continuous we may want to discard this write. | |
444 int ret = child->WriteData(kSparseData, child_offset, io_buf.get(), | |
445 write_len, CompletionCallback(), true); | |
446 if (net_log_.IsLogging()) { | |
447 net_log_.EndEventWithNetErrorCode( | |
448 net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA, ret); | |
449 } | |
450 if (ret < 0) | |
451 return ret; | |
452 else if (ret == 0) | |
453 break; | |
454 | |
455 // Keep a record of the first byte position in the child if the write was | |
456 // not aligned nor continuous. This is to enable witting to the middle | |
457 // of an entry and still keep track of data off the aligned edge. | |
458 if (data_size != child_offset) | |
459 child->child_first_pos_ = child_offset; | |
460 | |
461 // Adjust the offset in the IO buffer. | |
462 io_buf->DidConsume(ret); | |
463 } | |
464 | |
465 UpdateRank(true); | |
466 | |
467 return io_buf->BytesConsumed(); | |
468 } | |
469 | |
470 int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start) { | |
471 DCHECK(type() == kParentEntry); | |
472 DCHECK(start); | |
473 | |
474 if (!InitSparseInfo()) | |
475 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; | |
476 | |
477 if (offset < 0 || len < 0 || !start) | |
478 return net::ERR_INVALID_ARGUMENT; | |
479 | |
480 MemEntryImpl* current_child = NULL; | |
481 | |
482 // Find the first child and record the number of empty bytes. | |
483 int empty = FindNextChild(offset, len, ¤t_child); | |
484 if (current_child) { | |
485 *start = offset + empty; | |
486 len -= empty; | |
487 | |
488 // Counts the number of continuous bytes. | |
489 int continuous = 0; | |
490 | |
491 // This loop scan for continuous bytes. | |
492 while (len && current_child) { | |
493 // Number of bytes available in this child. | |
494 int data_size = current_child->GetDataSize(kSparseData) - | |
495 ToChildOffset(*start + continuous); | |
496 if (data_size > len) | |
497 data_size = len; | |
498 | |
499 // We have found more continuous bytes so increment the count. Also | |
500 // decrement the length we should scan. | |
501 continuous += data_size; | |
502 len -= data_size; | |
503 | |
504 // If the next child is discontinuous, break the loop. | |
505 if (FindNextChild(*start + continuous, len, ¤t_child)) | |
506 break; | |
507 } | |
508 return continuous; | |
509 } | |
510 *start = offset; | |
511 return 0; | |
512 } | |
513 | |
514 void MemEntryImpl::PrepareTarget(int index, int offset, int buf_len) { | |
515 int entry_size = GetDataSize(index); | |
516 | |
517 if (entry_size >= offset + buf_len) | |
518 return; // Not growing the stored data. | |
519 | |
520 if (static_cast<int>(data_[index].size()) < offset + buf_len) | |
521 data_[index].resize(offset + buf_len); | |
522 | |
523 if (offset <= entry_size) | |
524 return; // There is no "hole" on the stored data. | |
525 | |
526 // Cleanup the hole not written by the user. The point is to avoid returning | |
527 // random stuff later on. | |
528 memset(&(data_[index])[entry_size], 0, offset - entry_size); | |
529 } | |
530 | |
531 void MemEntryImpl::UpdateRank(bool modified) { | |
532 Time current = Time::Now(); | |
533 last_used_ = current; | |
534 | |
535 if (modified) | |
536 last_modified_ = current; | |
537 | |
538 if (!doomed_) | |
539 backend_->UpdateRank(this); | |
540 } | |
541 | |
542 bool MemEntryImpl::InitSparseInfo() { | |
543 DCHECK(type() == kParentEntry); | |
544 | |
545 if (!children_.get()) { | |
546 // If we already have some data in sparse stream but we are being | |
547 // initialized as a sparse entry, we should fail. | |
548 if (GetDataSize(kSparseData)) | |
549 return false; | |
550 children_.reset(new EntryMap()); | |
551 | |
552 // The parent entry stores data for the first block, so save this object to | |
553 // index 0. | |
554 (*children_)[0] = this; | |
555 } | |
556 return true; | |
557 } | |
558 | |
559 bool MemEntryImpl::InitChildEntry(MemEntryImpl* parent, int child_id, | |
560 net::NetLog* net_log) { | |
561 DCHECK(!parent_); | |
562 DCHECK(!child_id_); | |
563 | |
564 net_log_ = net::BoundNetLog::Make(net_log, | |
565 net::NetLog::SOURCE_MEMORY_CACHE_ENTRY); | |
566 net_log_.BeginEvent( | |
567 net::NetLog::TYPE_DISK_CACHE_MEM_ENTRY_IMPL, | |
568 base::Bind(&NetLogChildEntryCreationCallback, parent, child_id_)); | |
569 | |
570 parent_ = parent; | |
571 child_id_ = child_id; | |
572 Time current = Time::Now(); | |
573 last_modified_ = current; | |
574 last_used_ = current; | |
575 // Insert this to the backend's ranking list. | |
576 backend_->InsertIntoRankingList(this); | |
577 return true; | |
578 } | |
579 | |
580 MemEntryImpl* MemEntryImpl::OpenChild(int64 offset, bool create) { | |
581 DCHECK(type() == kParentEntry); | |
582 int index = ToChildIndex(offset); | |
583 EntryMap::iterator i = children_->find(index); | |
584 if (i != children_->end()) { | |
585 return i->second; | |
586 } else if (create) { | |
587 MemEntryImpl* child = new MemEntryImpl(backend_); | |
588 child->InitChildEntry(this, index, net_log_.net_log()); | |
589 (*children_)[index] = child; | |
590 return child; | |
591 } | |
592 return NULL; | |
593 } | |
594 | |
595 int MemEntryImpl::FindNextChild(int64 offset, int len, MemEntryImpl** child) { | |
596 DCHECK(child); | |
597 *child = NULL; | |
598 int scanned_len = 0; | |
599 | |
600 // This loop tries to find the first existing child. | |
601 while (scanned_len < len) { | |
602 // This points to the current offset in the child. | |
603 int current_child_offset = ToChildOffset(offset + scanned_len); | |
604 MemEntryImpl* current_child = OpenChild(offset + scanned_len, false); | |
605 if (current_child) { | |
606 int child_first_pos = current_child->child_first_pos_; | |
607 | |
608 // This points to the first byte that we should be reading from, we need | |
609 // to take care of the filled region and the current offset in the child. | |
610 int first_pos = std::max(current_child_offset, child_first_pos); | |
611 | |
612 // If the first byte position we should read from doesn't exceed the | |
613 // filled region, we have found the first child. | |
614 if (first_pos < current_child->GetDataSize(kSparseData)) { | |
615 *child = current_child; | |
616 | |
617 // We need to advance the scanned length. | |
618 scanned_len += first_pos - current_child_offset; | |
619 break; | |
620 } | |
621 } | |
622 scanned_len += kMaxSparseEntrySize - current_child_offset; | |
623 } | |
624 return scanned_len; | |
625 } | |
626 | |
627 void MemEntryImpl::DetachChild(int child_id) { | |
628 children_->erase(child_id); | |
629 } | |
630 | |
631 } // namespace disk_cache | |
OLD | NEW |