Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(313)

Side by Side Diff: third_party/tcmalloc/chromium/src/page_heap.h

Issue 1076002: Revert 41938 - Merged third_party/tcmalloc/vendor/src(googleperftools r87) in... (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: Created 10 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2008, Google Inc. 1 // Copyright (c) 2008, Google Inc.
2 // All rights reserved. 2 // All rights reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // * Redistributions of source code must retain the above copyright 8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer. 9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above 10 // * Redistributions in binary form must reproduce the above
(...skipping 16 matching lines...) Expand all
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 29
30 // --- 30 // ---
31 // Author: Sanjay Ghemawat <opensource@google.com> 31 // Author: Sanjay Ghemawat <opensource@google.com>
32 32
33 #ifndef TCMALLOC_PAGE_HEAP_H_ 33 #ifndef TCMALLOC_PAGE_HEAP_H_
34 #define TCMALLOC_PAGE_HEAP_H_ 34 #define TCMALLOC_PAGE_HEAP_H_
35 35
36 #include <config.h> 36 #include <config.h>
37 #include <google/malloc_extension.h>
38 #include "common.h" 37 #include "common.h"
39 #include "packed-cache-inl.h" 38 #include "packed-cache-inl.h"
40 #include "pagemap.h" 39 #include "pagemap.h"
41 #include "span.h" 40 #include "span.h"
42 41
43 // We need to dllexport PageHeap just for the unittest. MSVC complains
44 // that we don't dllexport the PageHeap members, but we don't need to
45 // test those, so I just suppress this warning.
46 #ifdef _MSC_VER
47 #pragma warning(push)
48 #pragma warning(disable:4251)
49 #endif
50
51 // This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if 42 // This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
52 // you're porting to a system where you really can't get a stacktrace. 43 // you're porting to a system where you really can't get a stacktrace.
53 #ifdef NO_TCMALLOC_SAMPLES 44 #ifdef NO_TCMALLOC_SAMPLES
54 // We use #define so code compiles even if you #include stacktrace.h somehow. 45 // We use #define so code compiles even if you #include stacktrace.h somehow.
55 # define GetStackTrace(stack, depth, skip) (0) 46 # define GetStackTrace(stack, depth, skip) (0)
56 #else 47 #else
57 # include <google/stacktrace.h> 48 # include <google/stacktrace.h>
58 #endif 49 #endif
59 50
60 namespace tcmalloc { 51 namespace tcmalloc {
61 52
62 // ------------------------------------------------------------------------- 53 // -------------------------------------------------------------------------
63 // Map from page-id to per-page data 54 // Map from page-id to per-page data
64 // ------------------------------------------------------------------------- 55 // -------------------------------------------------------------------------
65 56
66 // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines. 57 // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
67 // ...except... 58 // ...except...
68 // On Windows, we use TCMalloc_PageMap1_LazyCommit<> for 32-bit machines. 59 // On Windows, we use TCMalloc_PageMap1_LazyCommit<> for 32-bit machines.
69 // We also use a simple one-level cache for hot PageID-to-sizeclass mappings, 60 // We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
70 // because sometimes the sizeclass is all the information we need. 61 // because sometimes the sizeclass is all the information we need.
71 62
72 // Selector class -- general selector uses 3-level map 63 // Selector class -- general selector uses 3-level map
73 template <int BITS> class MapSelector { 64 template <int BITS> class MapSelector {
74 public: 65 public:
75 typedef TCMalloc_PageMap3<BITS-kPageShift> Type; 66 typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
76 typedef PackedCache<BITS-kPageShift, uint64_t> CacheType; 67 typedef PackedCache<BITS-kPageShift, uint64_t> CacheType;
77 }; 68 };
78 69
79 // A two-level map for 32-bit machines
80 template <> class MapSelector<32> { 70 template <> class MapSelector<32> {
81 public: 71 public:
82 #ifdef WIN32 72 #ifdef WIN32
83 // A flat map for 32-bit machines (with lazy commit of memory). 73 // A flat map for 32-bit machines (with lazy commit of memory).
84 typedef TCMalloc_PageMap1_LazyCommit<32-kPageShift> Type; 74 typedef TCMalloc_PageMap1_LazyCommit<32-kPageShift> Type;
85 #else 75 #else
86 // A two-level map for 32-bit machines 76 // A two-level map for 32-bit machines
87 typedef TCMalloc_PageMap2<32-kPageShift> Type; 77 typedef TCMalloc_PageMap2<32-kPageShift> Type;
88 #endif 78 #endif
89 typedef PackedCache<32-kPageShift, uint16_t> CacheType; 79 typedef PackedCache<32-kPageShift, uint16_t> CacheType;
90 }; 80 };
91 81
92 // ------------------------------------------------------------------------- 82 // -------------------------------------------------------------------------
93 // Page-level allocator 83 // Page-level allocator
94 // * Eager coalescing 84 // * Eager coalescing
95 // 85 //
96 // Heap for page-level allocation. We allow allocating and freeing a 86 // Heap for page-level allocation. We allow allocating and freeing a
97 // contiguous runs of pages (called a "span"). 87 // contiguous runs of pages (called a "span").
98 // ------------------------------------------------------------------------- 88 // -------------------------------------------------------------------------
99 89
100 class PERFTOOLS_DLL_DECL PageHeap { 90 class PageHeap {
101 public: 91 public:
102 PageHeap(); 92 PageHeap();
103 93
104 // Allocate a run of "n" pages. Returns zero if out of memory. 94 // Allocate a run of "n" pages. Returns zero if out of memory.
105 // Caller should not pass "n == 0" -- instead, n should have 95 // Caller should not pass "n == 0" -- instead, n should have
106 // been rounded up already. 96 // been rounded up already.
107 Span* New(Length n); 97 Span* New(Length n);
108 98
109 // Delete the span "[p, p+n-1]". 99 // Delete the span "[p, p+n-1]".
110 // REQUIRES: span was returned by earlier call to New() and 100 // REQUIRES: span was returned by earlier call to New() and
111 // has not yet been deleted. 101 // has not yet been deleted.
112 void Delete(Span* span); 102 void Delete(Span* span);
113 103
114 // Mark an allocated span as being used for small objects of the 104 // Mark an allocated span as being used for small objects of the
115 // specified size-class. 105 // specified size-class.
116 // REQUIRES: span was returned by an earlier call to New() 106 // REQUIRES: span was returned by an earlier call to New()
117 // and has not yet been deleted. 107 // and has not yet been deleted.
118 void RegisterSizeClass(Span* span, size_t sc); 108 void RegisterSizeClass(Span* span, size_t sc);
119 109
120 // Split an allocated span into two spans: one of length "n" pages 110 // Split an allocated span into two spans: one of length "n" pages
121 // followed by another span of length "span->length - n" pages. 111 // followed by another span of length "span->length - n" pages.
122 // Modifies "*span" to point to the first span of length "n" pages. 112 // Modifies "*span" to point to the first span of length "n" pages.
123 // Returns a pointer to the second span. 113 // Returns a pointer to the second span.
124 // 114 //
125 // REQUIRES: "0 < n < span->length" 115 // REQUIRES: "0 < n < span->length"
126 // REQUIRES: span->location == IN_USE 116 // REQUIRES: span->location == IN_USE
127 // REQUIRES: span->sizeclass == 0 117 // REQUIRES: span->sizeclass == 0
128 Span* Split(Span* span, Length n); 118 Span* Split(Span* span, Length n);
129 119
130 // Return the descriptor for the specified page. Returns NULL if 120 // Return the descriptor for the specified page.
131 // this PageID was not allocated previously.
132 inline Span* GetDescriptor(PageID p) const { 121 inline Span* GetDescriptor(PageID p) const {
133 return reinterpret_cast<Span*>(pagemap_.get(p)); 122 return reinterpret_cast<Span*>(pagemap_.get(p));
134 } 123 }
135 124
136 // Dump state to stderr 125 // Dump state to stderr
137 void Dump(TCMalloc_Printer* out); 126 void Dump(TCMalloc_Printer* out);
138 127
139 // If this page heap is managing a range with starting page # >= start, 128 // Return number of bytes allocated from system
140 // store info about the range in *r and return true. Else return false. 129 inline uint64_t SystemBytes() const { return system_bytes_; }
141 bool GetNextRange(PageID start, base::MallocRange* r);
142 130
143 // Page heap statistics 131 inline uint64_t CommittedBytes() const { return committed_bytes_; }
144 struct Stats {
145 Stats() : system_bytes(0), free_bytes(0), unmapped_bytes(0) {}
146 uint64_t system_bytes; // Total bytes allocated from system
147 uint64_t free_bytes; // Total bytes on normal freelists
148 uint64_t unmapped_bytes; // Total bytes on returned freelists
149 uint64_t committed_bytes; // Bytes committed, always <= system_bytes_.
150 132
151 }; 133 // Return number of free bytes in heap
152 inline Stats stats() const { return stats_; } 134 uint64_t FreeBytes() const {
135 return (static_cast<uint64_t>(free_pages_) << kPageShift);
136 }
153 137
154 bool Check(); 138 bool Check();
155 // Like Check() but does some more comprehensive checking. 139 // Like Check() but does some more comprehensive checking.
156 bool CheckExpensive(); 140 bool CheckExpensive();
157 bool CheckList(Span* list, Length min_pages, Length max_pages, 141 bool CheckList(Span* list, Length min_pages, Length max_pages,
158 int freelist); // ON_NORMAL_FREELIST or ON_RETURNED_FREELIST 142 int freelist); // ON_NORMAL_FREELIST or ON_RETURNED_FREELIST
159 143
160 // Try to release at least num_pages for reuse by the OS. Returns 144 // Release all pages on the free list for reuse by the OS:
161 // the actual number of pages released, which may be less than 145 void ReleaseFreePages();
162 // num_pages if there weren't enough pages to release. The result
163 // may also be larger than num_pages since page_heap might decide to
164 // release one large range instead of fragmenting it into two
165 // smaller released and unreleased ranges.
166 Length ReleaseAtLeastNPages(Length num_pages);
167 146
168 // Return 0 if we have no information, or else the correct sizeclass for p. 147 // Return 0 if we have no information, or else the correct sizeclass for p.
169 // Reads and writes to pagemap_cache_ do not require locking. 148 // Reads and writes to pagemap_cache_ do not require locking.
170 // The entries are 64 bits on 64-bit hardware and 16 bits on 149 // The entries are 64 bits on 64-bit hardware and 16 bits on
171 // 32-bit hardware, and we don't mind raciness as long as each read of 150 // 32-bit hardware, and we don't mind raciness as long as each read of
172 // an entry yields a valid entry, not a partially updated entry. 151 // an entry yields a valid entry, not a partially updated entry.
173 size_t GetSizeClassIfCached(PageID p) const { 152 size_t GetSizeClassIfCached(PageID p) const {
174 return pagemap_cache_.GetOrDefault(p, 0); 153 return pagemap_cache_.GetOrDefault(p, 0);
175 } 154 }
176 void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); } 155 void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); }
177 156
178 private: 157 private:
179 // Allocates a big block of memory for the pagemap once we reach more than 158 // Allocates a big block of memory for the pagemap once we reach more than
180 // 128MB 159 // 128MB
181 static const size_t kPageMapBigAllocationThreshold = 128 << 20; 160 static const size_t kPageMapBigAllocationThreshold = 128 << 20;
182 161
183 // Minimum number of pages to fetch from system at a time. Must be 162 // Minimum number of pages to fetch from system at a time. Must be
184 // significantly bigger than kBlockSize to amortize system-call 163 // significantly bigger than kBlockSize to amortize system-call
185 // overhead, and also to reduce external fragementation. Also, we 164 // overhead, and also to reduce external fragementation. Also, we
186 // should keep this value big because various incarnations of Linux 165 // should keep this value big because various incarnations of Linux
187 // have small limits on the number of mmap() regions per 166 // have small limits on the number of mmap() regions per
188 // address-space. 167 // address-space.
189 static const int kMinSystemAlloc = 1 << (20 - kPageShift); 168 static const int kMinSystemAlloc = 1 << (20 - kPageShift);
190 169
191 // For all span-lengths < kMaxPages we keep an exact-size list. 170 // For all span-lengths < kMaxPages we keep an exact-size list.
192 // REQUIRED: kMaxPages >= kMinSystemAlloc; 171 // REQUIRED: kMaxPages >= kMinSystemAlloc;
193 static const size_t kMaxPages = kMinSystemAlloc; 172 static const size_t kMaxPages = kMinSystemAlloc;
194 173
195 // Never delay scavenging for more than the following number of
196 // deallocated pages. With 4K pages, this comes to 4GB of
197 // deallocation.
198 // Chrome: Changed to 64MB
199 static const int kMaxReleaseDelay = 1 << 14;
200
201 // If there is nothing to release, wait for so many pages before
202 // scavenging again. With 4K pages, this comes to 1GB of memory.
203 // Chrome: Changed to 16MB
204 static const int kDefaultReleaseDelay = 1 << 12;
205
206 // Pick the appropriate map and cache types based on pointer size 174 // Pick the appropriate map and cache types based on pointer size
207 typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap; 175 typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap;
208 typedef MapSelector<8*sizeof(uintptr_t)>::CacheType PageMapCache; 176 typedef MapSelector<8*sizeof(uintptr_t)>::CacheType PageMapCache;
209 PageMap pagemap_; 177 PageMap pagemap_;
210 mutable PageMapCache pagemap_cache_; 178 mutable PageMapCache pagemap_cache_;
211 179
212 // We segregate spans of a given size into two circular linked 180 // We segregate spans of a given size into two circular linked
213 // lists: one for normal spans, and one for spans whose memory 181 // lists: one for normal spans, and one for spans whose memory
214 // has been returned to the system. 182 // has been returned to the system.
215 struct SpanList { 183 struct SpanList {
216 Span normal; 184 Span normal;
217 Span returned; 185 Span returned;
218 }; 186 };
219 187
220 // List of free spans of length >= kMaxPages 188 // List of free spans of length >= kMaxPages
221 SpanList large_; 189 SpanList large_;
222 190
223 // Array mapping from span length to a doubly linked list of free spans 191 // Array mapping from span length to a doubly linked list of free spans
224 SpanList free_[kMaxPages]; 192 SpanList free_[kMaxPages];
225 193
226 // Statistics on system, free, and unmapped bytes 194 // Number of pages kept in free lists
227 Stats stats_; 195 uintptr_t free_pages_;
196
197 // Bytes allocated from system
198 uint64_t system_bytes_;
199
200 // Bytes committed, always <= system_bytes_.
201 uint64_t committed_bytes_;
202
228 bool GrowHeap(Length n); 203 bool GrowHeap(Length n);
229 204
230 // REQUIRES: span->length >= n 205 // REQUIRES: span->length >= n
231 // REQUIRES: span->location != IN_USE 206 // REQUIRES: span->location != IN_USE
232 // Remove span from its free list, and move any leftover part of 207 // Remove span from its free list, and move any leftover part of
233 // span into appropriate free lists. Also update "span" to have 208 // span into appropriate free lists. Also update "span" to have
234 // length exactly "n" and mark it as non-free so it can be returned 209 // length exactly "n" and mark it as non-free so it can be returned
235 // to the client. After all that, decrease free_pages_ by n and 210 // to the client. After all that, decrease free_pages_ by n and
236 // return span. 211 // return span.
237 Span* Carve(Span* span, Length n); 212 Span* Carve(Span* span, Length n);
238 213
239 void RecordSpan(Span* span) { 214 void RecordSpan(Span* span) {
240 pagemap_.set(span->start, span); 215 pagemap_.set(span->start, span);
241 if (span->length > 1) { 216 if (span->length > 1) {
242 pagemap_.set(span->start + span->length - 1, span); 217 pagemap_.set(span->start + span->length - 1, span);
243 } 218 }
244 } 219 }
245 220
246 // Allocate a large span of length == n. If successful, returns a 221 // Allocate a large span of length == n. If successful, returns a
247 // span of exactly the specified length. Else, returns NULL. 222 // span of exactly the specified length. Else, returns NULL.
248 Span* AllocLarge(Length n); 223 Span* AllocLarge(Length n);
249 224
250 // Coalesce span with neighboring spans if possible, prepend to 225 #if defined(OS_LINUX)
251 // appropriate free list, and adjust stats. 226 // Coalesce span with neighboring spans if possible. Add the
252 void MergeIntoFreeList(Span* span); 227 // resulting span to the appropriate free list.
253 228 void AddToFreeList(Span* span);
229 #else // ! defined(OS_LINUX)
254 // Commit the span. 230 // Commit the span.
255 void CommitSpan(Span* span); 231 void CommitSpan(Span* span);
256 232
257 // Decommit the span. 233 // Decommit the span.
258 void DecommitSpan(Span* span); 234 void DecommitSpan(Span* span);
259 235 #endif // ! defined(OS_LINUX)
260 // Prepends span to appropriate free list, and adjusts stats.
261 void PrependToFreeList(Span* span);
262
263 // Removes span from its free list, and adjust stats.
264 void RemoveFromFreeList(Span* span);
265 236
266 // Incrementally release some memory to the system. 237 // Incrementally release some memory to the system.
267 // IncrementalScavenge(n) is called whenever n pages are freed. 238 // IncrementalScavenge(n) is called whenever n pages are freed.
268 void IncrementalScavenge(Length n); 239 void IncrementalScavenge(Length n);
269 240
270 // Release the last span on the normal portion of this list. 241 #if defined(OS_LINUX)
271 // Return the length of that span. 242 // Release all pages in the specified free list for reuse by the OS
272 Length ReleaseLastNormalSpan(SpanList* slist); 243 // REQURES: list must be a "normal" list (i.e., not "returned")
273 244 void ReleaseFreeList(Span* list);
245 #else // ! defined(OS_LINUX)
246 // Releases all memory held in the given list's 'normal' freelist and adds
247 // it to the 'released' freelist.
248 void ReleaseFreeList(Span* list, Span* returned);
249 #endif // ! defined(OS_LINUX)
274 250
275 // Number of pages to deallocate before doing more scavenging 251 // Number of pages to deallocate before doing more scavenging
276 int64_t scavenge_counter_; 252 int64_t scavenge_counter_;
277 253
278 // Index of last free list where we released memory to the OS. 254 // Index of last free list we scavenged
279 int release_index_; 255 int scavenge_index_;
280 }; 256 };
281 257
282 } // namespace tcmalloc 258 } // namespace tcmalloc
283 259
284 #ifdef _MSC_VER
285 #pragma warning(pop)
286 #endif
287
288 #endif // TCMALLOC_PAGE_HEAP_H_ 260 #endif // TCMALLOC_PAGE_HEAP_H_
OLDNEW
« no previous file with comments | « third_party/tcmalloc/chromium/src/packed-cache-inl.h ('k') | third_party/tcmalloc/chromium/src/page_heap.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698