Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(146)

Side by Side Diff: net/disk_cache/mem_entry_impl.cc

Issue 6263010: More net/ header/implementation method reordering. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 9 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2006-2010 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2006-2010 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "net/disk_cache/mem_entry_impl.h" 5 #include "net/disk_cache/mem_entry_impl.h"
6 6
7 #include "base/logging.h" 7 #include "base/logging.h"
8 #include "net/base/io_buffer.h" 8 #include "net/base/io_buffer.h"
9 #include "net/base/net_errors.h" 9 #include "net/base/net_errors.h"
10 #include "net/disk_cache/mem_backend_impl.h" 10 #include "net/disk_cache/mem_backend_impl.h"
(...skipping 30 matching lines...) Expand all
41 ref_count_ = 0; 41 ref_count_ = 0;
42 parent_ = NULL; 42 parent_ = NULL;
43 child_id_ = 0; 43 child_id_ = 0;
44 child_first_pos_ = 0; 44 child_first_pos_ = 0;
45 next_ = NULL; 45 next_ = NULL;
46 prev_ = NULL; 46 prev_ = NULL;
47 for (int i = 0; i < NUM_STREAMS; i++) 47 for (int i = 0; i < NUM_STREAMS; i++)
48 data_size_[i] = 0; 48 data_size_[i] = 0;
49 } 49 }
50 50
51 MemEntryImpl::~MemEntryImpl() { 51 // ------------------------------------------------------------------------
52 for (int i = 0; i < NUM_STREAMS; i++) 52
53 backend_->ModifyStorageSize(data_size_[i], 0); 53 bool MemEntryImpl::CreateEntry(const std::string& key) {
54 backend_->ModifyStorageSize(static_cast<int32>(key_.size()), 0); 54 key_ = key;
55 Time current = Time::Now();
56 last_modified_ = current;
57 last_used_ = current;
58 Open();
59 backend_->ModifyStorageSize(0, static_cast<int32>(key.size()));
60 return true;
55 } 61 }
56 62
63 void MemEntryImpl::InternalDoom() {
64 doomed_ = true;
65 if (!ref_count_) {
66 if (type() == kParentEntry) {
67 // If this is a parent entry, we need to doom all the child entries.
68 if (children_.get()) {
69 EntryMap children;
70 children.swap(*children_);
71 for (EntryMap::iterator i = children.begin();
72 i != children.end(); ++i) {
73 // Since a pointer to this object is also saved in the map, avoid
74 // dooming it.
75 if (i->second != this)
76 i->second->Doom();
77 }
78 DCHECK(children_->size() == 0);
79 }
80 } else {
81 // If this is a child entry, detach it from the parent.
82 parent_->DetachChild(child_id_);
83 }
84 delete this;
85 }
86 }
87
88 void MemEntryImpl::Open() {
89 // Only a parent entry can be opened.
90 // TODO(hclam): make sure it's correct to not apply the concept of ref
91 // counting to child entry.
92 DCHECK(type() == kParentEntry);
93 ref_count_++;
94 DCHECK(ref_count_ >= 0);
95 DCHECK(!doomed_);
96 }
97
98 bool MemEntryImpl::InUse() {
99 if (type() == kParentEntry) {
100 return ref_count_ > 0;
101 } else {
102 // A child entry is always not in use. The consequence is that a child entry
103 // can always be evicted while the associated parent entry is currently in
104 // used (i.e. opened).
105 return false;
106 }
107 }
108
109 // ------------------------------------------------------------------------
110
57 void MemEntryImpl::Doom() { 111 void MemEntryImpl::Doom() {
58 if (doomed_) 112 if (doomed_)
59 return; 113 return;
60 if (type() == kParentEntry) { 114 if (type() == kParentEntry) {
61 // Perform internal doom from the backend if this is a parent entry. 115 // Perform internal doom from the backend if this is a parent entry.
62 backend_->InternalDoomEntry(this); 116 backend_->InternalDoomEntry(this);
63 } else { 117 } else {
64 // Manually detach from the backend and perform internal doom. 118 // Manually detach from the backend and perform internal doom.
65 backend_->RemoveFromRankingList(this); 119 backend_->RemoveFromRankingList(this);
66 InternalDoom(); 120 InternalDoom();
(...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after
256 310
257 // Adjust the offset in the IO buffer. 311 // Adjust the offset in the IO buffer.
258 io_buf->DidConsume(ret); 312 io_buf->DidConsume(ret);
259 } 313 }
260 314
261 UpdateRank(true); 315 UpdateRank(true);
262 316
263 return io_buf->BytesConsumed(); 317 return io_buf->BytesConsumed();
264 } 318 }
265 319
320 int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start,
321 CompletionCallback* callback) {
322 return GetAvailableRange(offset, len, start);
323 }
324
325 bool MemEntryImpl::CouldBeSparse() const {
326 DCHECK_EQ(kParentEntry, type());
327 return (children_.get() != NULL);
328 }
329
330 int MemEntryImpl::ReadyForSparseIO(
331 net::CompletionCallback* completion_callback) {
332 return net::OK;
333 }
334
335 // ------------------------------------------------------------------------
336
337 MemEntryImpl::~MemEntryImpl() {
338 for (int i = 0; i < NUM_STREAMS; i++)
339 backend_->ModifyStorageSize(data_size_[i], 0);
340 backend_->ModifyStorageSize(static_cast<int32>(key_.size()), 0);
341 }
342
266 int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start) { 343 int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start) {
267 DCHECK(type() == kParentEntry); 344 DCHECK(type() == kParentEntry);
268 DCHECK(start); 345 DCHECK(start);
269 346
270 if (!InitSparseInfo()) 347 if (!InitSparseInfo())
271 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; 348 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
272 349
273 if (offset < 0 || len < 0 || !start) 350 if (offset < 0 || len < 0 || !start)
274 return net::ERR_INVALID_ARGUMENT; 351 return net::ERR_INVALID_ARGUMENT;
275 352
(...skipping 24 matching lines...) Expand all
300 // If the next child is discontinuous, break the loop. 377 // If the next child is discontinuous, break the loop.
301 if (FindNextChild(*start + continuous, len, &current_child)) 378 if (FindNextChild(*start + continuous, len, &current_child))
302 break; 379 break;
303 } 380 }
304 return continuous; 381 return continuous;
305 } 382 }
306 *start = offset; 383 *start = offset;
307 return 0; 384 return 0;
308 } 385 }
309 386
310 int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start,
311 CompletionCallback* callback) {
312 return GetAvailableRange(offset, len, start);
313 }
314
315 bool MemEntryImpl::CouldBeSparse() const {
316 DCHECK_EQ(kParentEntry, type());
317 return (children_.get() != NULL);
318 }
319
320 int MemEntryImpl::ReadyForSparseIO(
321 net::CompletionCallback* completion_callback) {
322 return net::OK;
323 }
324
325 // ------------------------------------------------------------------------
326
327 bool MemEntryImpl::CreateEntry(const std::string& key) {
328 key_ = key;
329 Time current = Time::Now();
330 last_modified_ = current;
331 last_used_ = current;
332 Open();
333 backend_->ModifyStorageSize(0, static_cast<int32>(key.size()));
334 return true;
335 }
336
337 void MemEntryImpl::InternalDoom() {
338 doomed_ = true;
339 if (!ref_count_) {
340 if (type() == kParentEntry) {
341 // If this is a parent entry, we need to doom all the child entries.
342 if (children_.get()) {
343 EntryMap children;
344 children.swap(*children_);
345 for (EntryMap::iterator i = children.begin();
346 i != children.end(); ++i) {
347 // Since a pointer to this object is also saved in the map, avoid
348 // dooming it.
349 if (i->second != this)
350 i->second->Doom();
351 }
352 DCHECK(children_->size() == 0);
353 }
354 } else {
355 // If this is a child entry, detach it from the parent.
356 parent_->DetachChild(child_id_);
357 }
358 delete this;
359 }
360 }
361
362 void MemEntryImpl::Open() {
363 // Only a parent entry can be opened.
364 // TODO(hclam): make sure it's correct to not apply the concept of ref
365 // counting to child entry.
366 DCHECK(type() == kParentEntry);
367 ref_count_++;
368 DCHECK(ref_count_ >= 0);
369 DCHECK(!doomed_);
370 }
371
372 bool MemEntryImpl::InUse() {
373 if (type() == kParentEntry) {
374 return ref_count_ > 0;
375 } else {
376 // A child entry is always not in use. The consequence is that a child entry
377 // can always be evicted while the associated parent entry is currently in
378 // used (i.e. opened).
379 return false;
380 }
381 }
382
383 // ------------------------------------------------------------------------
384
385 void MemEntryImpl::PrepareTarget(int index, int offset, int buf_len) { 387 void MemEntryImpl::PrepareTarget(int index, int offset, int buf_len) {
386 int entry_size = GetDataSize(index); 388 int entry_size = GetDataSize(index);
387 389
388 if (entry_size >= offset + buf_len) 390 if (entry_size >= offset + buf_len)
389 return; // Not growing the stored data. 391 return; // Not growing the stored data.
390 392
391 if (static_cast<int>(data_[index].size()) < offset + buf_len) 393 if (static_cast<int>(data_[index].size()) < offset + buf_len)
392 data_[index].resize(offset + buf_len); 394 data_[index].resize(offset + buf_len);
393 395
394 if (offset <= entry_size) 396 if (offset <= entry_size)
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
485 scanned_len += kMaxSparseEntrySize - current_child_offset; 487 scanned_len += kMaxSparseEntrySize - current_child_offset;
486 } 488 }
487 return scanned_len; 489 return scanned_len;
488 } 490 }
489 491
490 void MemEntryImpl::DetachChild(int child_id) { 492 void MemEntryImpl::DetachChild(int child_id) {
491 children_->erase(child_id); 493 children_->erase(child_id);
492 } 494 }
493 495
494 } // namespace disk_cache 496 } // namespace disk_cache
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698