| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #ifndef NET_DISK_CACHE_MEMORY_MEM_ENTRY_IMPL_H_ | |
| 6 #define NET_DISK_CACHE_MEMORY_MEM_ENTRY_IMPL_H_ | |
| 7 | |
| 8 #include "base/containers/hash_tables.h" | |
| 9 #include "base/gtest_prod_util.h" | |
| 10 #include "base/memory/scoped_ptr.h" | |
| 11 #include "net/base/net_log.h" | |
| 12 #include "net/disk_cache/disk_cache.h" | |
| 13 | |
| 14 namespace disk_cache { | |
| 15 | |
| 16 class MemBackendImpl; | |
| 17 | |
| 18 // This class implements the Entry interface for the memory-only cache. An | |
| 19 // object of this class represents a single entry on the cache. We use two | |
| 20 // types of entries, parent and child to support sparse caching. | |
| 21 // | |
| 22 // A parent entry is non-sparse until a sparse method is invoked (i.e. | |
| 23 // ReadSparseData, WriteSparseData, GetAvailableRange) when sparse information | |
| 24 // is initialized. It then manages a list of child entries and delegates the | |
| 25 // sparse API calls to the child entries. It creates and deletes child entries | |
| 26 // and updates the list when needed. | |
| 27 // | |
| 28 // A child entry is used to carry partial cache content, non-sparse methods like | |
| 29 // ReadData and WriteData cannot be applied to them. The lifetime of a child | |
| 30 // entry is managed by the parent entry that created it except that the entry | |
| 31 // can be evicted independently. A child entry does not have a key and it is not | |
| 32 // registered in the backend's entry map. It is registered in the backend's | |
| 33 // ranking list to enable eviction of a partial content. | |
| 34 // | |
| 35 // A sparse entry has a fixed maximum size and can be partially filled. There | |
| 36 // can only be one continous filled region in a sparse entry, as illustrated by | |
| 37 // the following example: | |
| 38 // | xxx ooooo | | |
| 39 // x = unfilled region | |
| 40 // o = filled region | |
| 41 // It is guranteed that there is at most one unfilled region and one filled | |
| 42 // region, and the unfilled region (if there is one) is always before the filled | |
| 43 // region. The book keeping for filled region in a sparse entry is done by using | |
| 44 // the variable |child_first_pos_| (inclusive). | |
| 45 | |
| 46 class MemEntryImpl : public Entry { | |
| 47 public: | |
| 48 enum EntryType { | |
| 49 kParentEntry, | |
| 50 kChildEntry, | |
| 51 }; | |
| 52 | |
| 53 explicit MemEntryImpl(MemBackendImpl* backend); | |
| 54 | |
| 55 // Performs the initialization of a EntryImpl that will be added to the | |
| 56 // cache. | |
| 57 bool CreateEntry(const std::string& key, net::NetLog* net_log); | |
| 58 | |
| 59 // Permanently destroys this entry. | |
| 60 void InternalDoom(); | |
| 61 | |
| 62 void Open(); | |
| 63 bool InUse(); | |
| 64 | |
| 65 MemEntryImpl* next() const { | |
| 66 return next_; | |
| 67 } | |
| 68 | |
| 69 MemEntryImpl* prev() const { | |
| 70 return prev_; | |
| 71 } | |
| 72 | |
| 73 void set_next(MemEntryImpl* next) { | |
| 74 next_ = next; | |
| 75 } | |
| 76 | |
| 77 void set_prev(MemEntryImpl* prev) { | |
| 78 prev_ = prev; | |
| 79 } | |
| 80 | |
| 81 EntryType type() const { | |
| 82 return parent_ ? kChildEntry : kParentEntry; | |
| 83 } | |
| 84 | |
| 85 const net::BoundNetLog& net_log() { | |
| 86 return net_log_; | |
| 87 } | |
| 88 | |
| 89 // Entry interface. | |
| 90 void Doom() override; | |
| 91 void Close() override; | |
| 92 std::string GetKey() const override; | |
| 93 base::Time GetLastUsed() const override; | |
| 94 base::Time GetLastModified() const override; | |
| 95 int32 GetDataSize(int index) const override; | |
| 96 int ReadData(int index, | |
| 97 int offset, | |
| 98 IOBuffer* buf, | |
| 99 int buf_len, | |
| 100 const CompletionCallback& callback) override; | |
| 101 int WriteData(int index, | |
| 102 int offset, | |
| 103 IOBuffer* buf, | |
| 104 int buf_len, | |
| 105 const CompletionCallback& callback, | |
| 106 bool truncate) override; | |
| 107 int ReadSparseData(int64 offset, | |
| 108 IOBuffer* buf, | |
| 109 int buf_len, | |
| 110 const CompletionCallback& callback) override; | |
| 111 int WriteSparseData(int64 offset, | |
| 112 IOBuffer* buf, | |
| 113 int buf_len, | |
| 114 const CompletionCallback& callback) override; | |
| 115 int GetAvailableRange(int64 offset, | |
| 116 int len, | |
| 117 int64* start, | |
| 118 const CompletionCallback& callback) override; | |
| 119 bool CouldBeSparse() const override; | |
| 120 void CancelSparseIO() override {} | |
| 121 int ReadyForSparseIO(const CompletionCallback& callback) override; | |
| 122 | |
| 123 private: | |
| 124 typedef base::hash_map<int, MemEntryImpl*> EntryMap; | |
| 125 | |
| 126 enum { | |
| 127 NUM_STREAMS = 3 | |
| 128 }; | |
| 129 | |
| 130 ~MemEntryImpl() override; | |
| 131 | |
| 132 // Do all the work for corresponding public functions. Implemented as | |
| 133 // separate functions to make logging of results simpler. | |
| 134 int InternalReadData(int index, int offset, IOBuffer* buf, int buf_len); | |
| 135 int InternalWriteData(int index, int offset, IOBuffer* buf, int buf_len, | |
| 136 bool truncate); | |
| 137 int InternalReadSparseData(int64 offset, IOBuffer* buf, int buf_len); | |
| 138 int InternalWriteSparseData(int64 offset, IOBuffer* buf, int buf_len); | |
| 139 | |
| 140 // Old Entry interface. | |
| 141 int GetAvailableRange(int64 offset, int len, int64* start); | |
| 142 | |
| 143 // Grows and cleans up the data buffer. | |
| 144 void PrepareTarget(int index, int offset, int buf_len); | |
| 145 | |
| 146 // Updates ranking information. | |
| 147 void UpdateRank(bool modified); | |
| 148 | |
| 149 // Initializes the children map and sparse info. This method is only called | |
| 150 // on a parent entry. | |
| 151 bool InitSparseInfo(); | |
| 152 | |
| 153 // Performs the initialization of a MemEntryImpl as a child entry. | |
| 154 // |parent| is the pointer to the parent entry. |child_id| is the ID of | |
| 155 // the new child. | |
| 156 bool InitChildEntry(MemEntryImpl* parent, int child_id, net::NetLog* net_log); | |
| 157 | |
| 158 // Returns an entry responsible for |offset|. The returned entry can be a | |
| 159 // child entry or this entry itself if |offset| points to the first range. | |
| 160 // If such entry does not exist and |create| is true, a new child entry is | |
| 161 // created. | |
| 162 MemEntryImpl* OpenChild(int64 offset, bool create); | |
| 163 | |
| 164 // Finds the first child located within the range [|offset|, |offset + len|). | |
| 165 // Returns the number of bytes ahead of |offset| to reach the first available | |
| 166 // bytes in the entry. The first child found is output to |child|. | |
| 167 int FindNextChild(int64 offset, int len, MemEntryImpl** child); | |
| 168 | |
| 169 // Removes child indexed by |child_id| from the children map. | |
| 170 void DetachChild(int child_id); | |
| 171 | |
| 172 std::string key_; | |
| 173 std::vector<char> data_[NUM_STREAMS]; // User data. | |
| 174 int32 data_size_[NUM_STREAMS]; | |
| 175 int ref_count_; | |
| 176 | |
| 177 int child_id_; // The ID of a child entry. | |
| 178 int child_first_pos_; // The position of the first byte in a child | |
| 179 // entry. | |
| 180 MemEntryImpl* next_; // Pointers for the LRU list. | |
| 181 MemEntryImpl* prev_; | |
| 182 MemEntryImpl* parent_; // Pointer to the parent entry. | |
| 183 scoped_ptr<EntryMap> children_; | |
| 184 | |
| 185 base::Time last_modified_; // LRU information. | |
| 186 base::Time last_used_; | |
| 187 MemBackendImpl* backend_; // Back pointer to the cache. | |
| 188 bool doomed_; // True if this entry was removed from the cache. | |
| 189 | |
| 190 net::BoundNetLog net_log_; | |
| 191 | |
| 192 DISALLOW_COPY_AND_ASSIGN(MemEntryImpl); | |
| 193 }; | |
| 194 | |
| 195 } // namespace disk_cache | |
| 196 | |
| 197 #endif // NET_DISK_CACHE_MEMORY_MEM_ENTRY_IMPL_H_ | |
| OLD | NEW |