| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #ifndef NET_DISK_CACHE_MEM_ENTRY_IMPL_H_ | |
| 6 #define NET_DISK_CACHE_MEM_ENTRY_IMPL_H_ | |
| 7 | |
| 8 #include "base/containers/hash_tables.h" | |
| 9 #include "base/gtest_prod_util.h" | |
| 10 #include "base/memory/scoped_ptr.h" | |
| 11 #include "net/base/net_log.h" | |
| 12 #include "net/disk_cache/disk_cache.h" | |
| 13 | |
| 14 namespace disk_cache { | |
| 15 | |
| 16 class MemBackendImpl; | |
| 17 | |
| 18 // This class implements the Entry interface for the memory-only cache. An | |
| 19 // object of this class represents a single entry on the cache. We use two | |
| 20 // types of entries, parent and child to support sparse caching. | |
| 21 // | |
| 22 // A parent entry is non-sparse until a sparse method is invoked (i.e. | |
| 23 // ReadSparseData, WriteSparseData, GetAvailableRange) when sparse information | |
| 24 // is initialized. It then manages a list of child entries and delegates the | |
| 25 // sparse API calls to the child entries. It creates and deletes child entries | |
| 26 // and updates the list when needed. | |
| 27 // | |
| 28 // A child entry is used to carry partial cache content, non-sparse methods like | |
| 29 // ReadData and WriteData cannot be applied to them. The lifetime of a child | |
| 30 // entry is managed by the parent entry that created it except that the entry | |
| 31 // can be evicted independently. A child entry does not have a key and it is not | |
| 32 // registered in the backend's entry map. It is registered in the backend's | |
| 33 // ranking list to enable eviction of a partial content. | |
| 34 // | |
| 35 // A sparse entry has a fixed maximum size and can be partially filled. There | |
| 36 // can only be one continous filled region in a sparse entry, as illustrated by | |
| 37 // the following example: | |
| 38 // | xxx ooooo | | |
| 39 // x = unfilled region | |
| 40 // o = filled region | |
| 41 // It is guranteed that there is at most one unfilled region and one filled | |
| 42 // region, and the unfilled region (if there is one) is always before the filled | |
| 43 // region. The book keeping for filled region in a sparse entry is done by using | |
| 44 // the variable |child_first_pos_| (inclusive). | |
| 45 | |
| 46 class MemEntryImpl : public Entry { | |
| 47 public: | |
| 48 enum EntryType { | |
| 49 kParentEntry, | |
| 50 kChildEntry, | |
| 51 }; | |
| 52 | |
| 53 explicit MemEntryImpl(MemBackendImpl* backend); | |
| 54 | |
| 55 // Performs the initialization of a EntryImpl that will be added to the | |
| 56 // cache. | |
| 57 bool CreateEntry(const std::string& key, net::NetLog* net_log); | |
| 58 | |
| 59 // Permanently destroys this entry. | |
| 60 void InternalDoom(); | |
| 61 | |
| 62 void Open(); | |
| 63 bool InUse(); | |
| 64 | |
| 65 MemEntryImpl* next() const { | |
| 66 return next_; | |
| 67 } | |
| 68 | |
| 69 MemEntryImpl* prev() const { | |
| 70 return prev_; | |
| 71 } | |
| 72 | |
| 73 void set_next(MemEntryImpl* next) { | |
| 74 next_ = next; | |
| 75 } | |
| 76 | |
| 77 void set_prev(MemEntryImpl* prev) { | |
| 78 prev_ = prev; | |
| 79 } | |
| 80 | |
| 81 EntryType type() const { | |
| 82 return parent_ ? kChildEntry : kParentEntry; | |
| 83 } | |
| 84 | |
| 85 const net::BoundNetLog& net_log() { | |
| 86 return net_log_; | |
| 87 } | |
| 88 | |
| 89 // Entry interface. | |
| 90 virtual void Doom() OVERRIDE; | |
| 91 virtual void Close() OVERRIDE; | |
| 92 virtual std::string GetKey() const OVERRIDE; | |
| 93 virtual base::Time GetLastUsed() const OVERRIDE; | |
| 94 virtual base::Time GetLastModified() const OVERRIDE; | |
| 95 virtual int32 GetDataSize(int index) const OVERRIDE; | |
| 96 virtual int ReadData(int index, int offset, IOBuffer* buf, int buf_len, | |
| 97 const CompletionCallback& callback) OVERRIDE; | |
| 98 virtual int WriteData(int index, int offset, IOBuffer* buf, int buf_len, | |
| 99 const CompletionCallback& callback, | |
| 100 bool truncate) OVERRIDE; | |
| 101 virtual int ReadSparseData(int64 offset, IOBuffer* buf, int buf_len, | |
| 102 const CompletionCallback& callback) OVERRIDE; | |
| 103 virtual int WriteSparseData(int64 offset, IOBuffer* buf, int buf_len, | |
| 104 const CompletionCallback& callback) OVERRIDE; | |
| 105 virtual int GetAvailableRange(int64 offset, int len, int64* start, | |
| 106 const CompletionCallback& callback) OVERRIDE; | |
| 107 virtual bool CouldBeSparse() const OVERRIDE; | |
| 108 virtual void CancelSparseIO() OVERRIDE {} | |
| 109 virtual int ReadyForSparseIO(const CompletionCallback& callback) OVERRIDE; | |
| 110 | |
| 111 private: | |
| 112 typedef base::hash_map<int, MemEntryImpl*> EntryMap; | |
| 113 | |
| 114 enum { | |
| 115 NUM_STREAMS = 3 | |
| 116 }; | |
| 117 | |
| 118 virtual ~MemEntryImpl(); | |
| 119 | |
| 120 // Do all the work for corresponding public functions. Implemented as | |
| 121 // separate functions to make logging of results simpler. | |
| 122 int InternalReadData(int index, int offset, IOBuffer* buf, int buf_len); | |
| 123 int InternalWriteData(int index, int offset, IOBuffer* buf, int buf_len, | |
| 124 bool truncate); | |
| 125 int InternalReadSparseData(int64 offset, IOBuffer* buf, int buf_len); | |
| 126 int InternalWriteSparseData(int64 offset, IOBuffer* buf, int buf_len); | |
| 127 | |
| 128 // Old Entry interface. | |
| 129 int GetAvailableRange(int64 offset, int len, int64* start); | |
| 130 | |
| 131 // Grows and cleans up the data buffer. | |
| 132 void PrepareTarget(int index, int offset, int buf_len); | |
| 133 | |
| 134 // Updates ranking information. | |
| 135 void UpdateRank(bool modified); | |
| 136 | |
| 137 // Initializes the children map and sparse info. This method is only called | |
| 138 // on a parent entry. | |
| 139 bool InitSparseInfo(); | |
| 140 | |
| 141 // Performs the initialization of a MemEntryImpl as a child entry. | |
| 142 // |parent| is the pointer to the parent entry. |child_id| is the ID of | |
| 143 // the new child. | |
| 144 bool InitChildEntry(MemEntryImpl* parent, int child_id, net::NetLog* net_log); | |
| 145 | |
| 146 // Returns an entry responsible for |offset|. The returned entry can be a | |
| 147 // child entry or this entry itself if |offset| points to the first range. | |
| 148 // If such entry does not exist and |create| is true, a new child entry is | |
| 149 // created. | |
| 150 MemEntryImpl* OpenChild(int64 offset, bool create); | |
| 151 | |
| 152 // Finds the first child located within the range [|offset|, |offset + len|). | |
| 153 // Returns the number of bytes ahead of |offset| to reach the first available | |
| 154 // bytes in the entry. The first child found is output to |child|. | |
| 155 int FindNextChild(int64 offset, int len, MemEntryImpl** child); | |
| 156 | |
| 157 // Removes child indexed by |child_id| from the children map. | |
| 158 void DetachChild(int child_id); | |
| 159 | |
| 160 std::string key_; | |
| 161 std::vector<char> data_[NUM_STREAMS]; // User data. | |
| 162 int32 data_size_[NUM_STREAMS]; | |
| 163 int ref_count_; | |
| 164 | |
| 165 int child_id_; // The ID of a child entry. | |
| 166 int child_first_pos_; // The position of the first byte in a child | |
| 167 // entry. | |
| 168 MemEntryImpl* next_; // Pointers for the LRU list. | |
| 169 MemEntryImpl* prev_; | |
| 170 MemEntryImpl* parent_; // Pointer to the parent entry. | |
| 171 scoped_ptr<EntryMap> children_; | |
| 172 | |
| 173 base::Time last_modified_; // LRU information. | |
| 174 base::Time last_used_; | |
| 175 MemBackendImpl* backend_; // Back pointer to the cache. | |
| 176 bool doomed_; // True if this entry was removed from the cache. | |
| 177 | |
| 178 net::BoundNetLog net_log_; | |
| 179 | |
| 180 DISALLOW_COPY_AND_ASSIGN(MemEntryImpl); | |
| 181 }; | |
| 182 | |
| 183 } // namespace disk_cache | |
| 184 | |
| 185 #endif // NET_DISK_CACHE_MEM_ENTRY_IMPL_H_ | |
| OLD | NEW |