OLD | NEW |
1 // Copyright (c) 2008, Google Inc. | 1 // Copyright (c) 2008, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
57 // you're porting to a system where you really can't get a stacktrace. | 57 // you're porting to a system where you really can't get a stacktrace. |
58 // Because we control the definition of GetStackTrace, all clients of | 58 // Because we control the definition of GetStackTrace, all clients of |
59 // GetStackTrace should #include us rather than stacktrace.h. | 59 // GetStackTrace should #include us rather than stacktrace.h. |
60 #ifdef NO_TCMALLOC_SAMPLES | 60 #ifdef NO_TCMALLOC_SAMPLES |
61 // We use #define so code compiles even if you #include stacktrace.h somehow. | 61 // We use #define so code compiles even if you #include stacktrace.h somehow. |
62 # define GetStackTrace(stack, depth, skip) (0) | 62 # define GetStackTrace(stack, depth, skip) (0) |
63 #else | 63 #else |
64 # include <google/stacktrace.h> | 64 # include <google/stacktrace.h> |
65 #endif | 65 #endif |
66 | 66 |
67 class TCMalloc_Printer; | |
68 namespace base { | 67 namespace base { |
69 struct MallocRange; | 68 struct MallocRange; |
70 } | 69 } |
71 | 70 |
72 namespace tcmalloc { | 71 namespace tcmalloc { |
73 | 72 |
74 // ------------------------------------------------------------------------- | 73 // ------------------------------------------------------------------------- |
75 // Map from page-id to per-page data | 74 // Map from page-id to per-page data |
76 // ------------------------------------------------------------------------- | 75 // ------------------------------------------------------------------------- |
77 | 76 |
78 // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines. | 77 // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines. |
79 // ...except... | |
80 // On Windows, we use TCMalloc_PageMap1_LazyCommit<> for 32-bit machines. | |
81 // We also use a simple one-level cache for hot PageID-to-sizeclass mappings, | 78 // We also use a simple one-level cache for hot PageID-to-sizeclass mappings, |
82 // because sometimes the sizeclass is all the information we need. | 79 // because sometimes the sizeclass is all the information we need. |
83 | 80 |
84 // Selector class -- general selector uses 3-level map | 81 // Selector class -- general selector uses 3-level map |
85 template <int BITS> class MapSelector { | 82 template <int BITS> class MapSelector { |
86 public: | 83 public: |
87 typedef TCMalloc_PageMap3<BITS-kPageShift> Type; | 84 typedef TCMalloc_PageMap3<BITS-kPageShift> Type; |
88 typedef PackedCache<BITS-kPageShift, uint64_t> CacheType; | 85 typedef PackedCache<BITS-kPageShift, uint64_t> CacheType; |
89 }; | 86 }; |
90 | 87 |
91 // A two-level map for 32-bit machines | 88 // A two-level map for 32-bit machines |
92 template <> class MapSelector<32> { | 89 template <> class MapSelector<32> { |
93 public: | 90 public: |
94 #ifdef WIN32 | |
95 // A flat map for 32-bit machines (with lazy commit of memory). | |
96 typedef TCMalloc_PageMap1_LazyCommit<32-kPageShift> Type; | |
97 #else | |
98 // A two-level map for 32-bit machines | |
99 typedef TCMalloc_PageMap2<32-kPageShift> Type; | 91 typedef TCMalloc_PageMap2<32-kPageShift> Type; |
100 #endif | |
101 typedef PackedCache<32-kPageShift, uint16_t> CacheType; | 92 typedef PackedCache<32-kPageShift, uint16_t> CacheType; |
102 }; | 93 }; |
103 | 94 |
104 // ------------------------------------------------------------------------- | 95 // ------------------------------------------------------------------------- |
105 // Page-level allocator | 96 // Page-level allocator |
106 // * Eager coalescing | 97 // * Eager coalescing |
107 // | 98 // |
108 // Heap for page-level allocation. We allow allocating and freeing a | 99 // Heap for page-level allocation. We allow allocating and freeing a |
109 // contiguous runs of pages (called a "span"). | 100 // contiguous runs of pages (called a "span"). |
110 // ------------------------------------------------------------------------- | 101 // ------------------------------------------------------------------------- |
(...skipping 27 matching lines...) Expand all Loading... |
138 // REQUIRES: span->location == IN_USE | 129 // REQUIRES: span->location == IN_USE |
139 // REQUIRES: span->sizeclass == 0 | 130 // REQUIRES: span->sizeclass == 0 |
140 Span* Split(Span* span, Length n); | 131 Span* Split(Span* span, Length n); |
141 | 132 |
142 // Return the descriptor for the specified page. Returns NULL if | 133 // Return the descriptor for the specified page. Returns NULL if |
143 // this PageID was not allocated previously. | 134 // this PageID was not allocated previously. |
144 inline Span* GetDescriptor(PageID p) const { | 135 inline Span* GetDescriptor(PageID p) const { |
145 return reinterpret_cast<Span*>(pagemap_.get(p)); | 136 return reinterpret_cast<Span*>(pagemap_.get(p)); |
146 } | 137 } |
147 | 138 |
148 // Dump state to stderr | |
149 void Dump(TCMalloc_Printer* out); | |
150 | |
151 // If this page heap is managing a range with starting page # >= start, | 139 // If this page heap is managing a range with starting page # >= start, |
152 // store info about the range in *r and return true. Else return false. | 140 // store info about the range in *r and return true. Else return false. |
153 bool GetNextRange(PageID start, base::MallocRange* r); | 141 bool GetNextRange(PageID start, base::MallocRange* r); |
154 | 142 |
155 // Page heap statistics | 143 // Page heap statistics |
156 struct Stats { | 144 struct Stats { |
157 Stats() : system_bytes(0), free_bytes(0), unmapped_bytes(0) {} | 145 Stats() : system_bytes(0), free_bytes(0), unmapped_bytes(0) {} |
158 uint64_t system_bytes; // Total bytes allocated from system | 146 uint64_t system_bytes; // Total bytes allocated from system |
159 uint64_t free_bytes; // Total bytes on normal freelists | 147 uint64_t free_bytes; // Total bytes on normal freelists |
160 uint64_t unmapped_bytes; // Total bytes on returned freelists | 148 uint64_t unmapped_bytes; // Total bytes on returned freelists |
161 uint64_t committed_bytes; // Bytes committed, always <= system_bytes_. | |
162 | |
163 }; | 149 }; |
164 inline Stats stats() const { return stats_; } | 150 inline Stats stats() const { return stats_; } |
165 void GetClassSizes(int64 class_sizes_normal[kMaxPages], | 151 |
166 int64 class_sizes_returned[kMaxPages], | 152 struct SmallSpanStats { |
167 int64* normal_pages_in_spans, | 153 // For each free list of small spans, the length (in spans) of the |
168 int64* returned_pages_in_spans); | 154 // normal and returned free lists for that size. |
| 155 int64 normal_length[kMaxPages]; |
| 156 int64 returned_length[kMaxPages]; |
| 157 }; |
| 158 void GetSmallSpanStats(SmallSpanStats* result); |
| 159 |
| 160 // Stats for free large spans (i.e., spans with more than kMaxPages pages). |
| 161 struct LargeSpanStats { |
| 162 int64 spans; // Number of such spans |
| 163 int64 normal_pages; // Combined page length of normal large spans |
| 164 int64 returned_pages; // Combined page length of unmapped spans |
| 165 }; |
| 166 void GetLargeSpanStats(LargeSpanStats* result); |
169 | 167 |
170 bool Check(); | 168 bool Check(); |
171 // Like Check() but does some more comprehensive checking. | 169 // Like Check() but does some more comprehensive checking. |
172 bool CheckExpensive(); | 170 bool CheckExpensive(); |
173 bool CheckList(Span* list, Length min_pages, Length max_pages, | 171 bool CheckList(Span* list, Length min_pages, Length max_pages, |
174 int freelist); // ON_NORMAL_FREELIST or ON_RETURNED_FREELIST | 172 int freelist); // ON_NORMAL_FREELIST or ON_RETURNED_FREELIST |
175 | 173 |
176 // Try to release at least num_pages for reuse by the OS. Returns | 174 // Try to release at least num_pages for reuse by the OS. Returns |
177 // the actual number of pages released, which may be less than | 175 // the actual number of pages released, which may be less than |
178 // num_pages if there weren't enough pages to release. The result | 176 // num_pages if there weren't enough pages to release. The result |
(...skipping 22 matching lines...) Expand all Loading... |
201 // overhead, and also to reduce external fragementation. Also, we | 199 // overhead, and also to reduce external fragementation. Also, we |
202 // should keep this value big because various incarnations of Linux | 200 // should keep this value big because various incarnations of Linux |
203 // have small limits on the number of mmap() regions per | 201 // have small limits on the number of mmap() regions per |
204 // address-space. | 202 // address-space. |
205 // REQUIRED: kMinSystemAlloc <= kMaxPages; | 203 // REQUIRED: kMinSystemAlloc <= kMaxPages; |
206 static const int kMinSystemAlloc = kMaxPages; | 204 static const int kMinSystemAlloc = kMaxPages; |
207 | 205 |
208 // Never delay scavenging for more than the following number of | 206 // Never delay scavenging for more than the following number of |
209 // deallocated pages. With 4K pages, this comes to 4GB of | 207 // deallocated pages. With 4K pages, this comes to 4GB of |
210 // deallocation. | 208 // deallocation. |
211 // Chrome: Changed to 64MB | 209 static const int kMaxReleaseDelay = 1 << 20; |
212 static const int kMaxReleaseDelay = 1 << 14; | |
213 | 210 |
214 // If there is nothing to release, wait for so many pages before | 211 // If there is nothing to release, wait for so many pages before |
215 // scavenging again. With 4K pages, this comes to 1GB of memory. | 212 // scavenging again. With 4K pages, this comes to 1GB of memory. |
216 // Chrome: Changed to 16MB | 213 static const int kDefaultReleaseDelay = 1 << 18; |
217 static const int kDefaultReleaseDelay = 1 << 12; | |
218 | 214 |
219 // Pick the appropriate map and cache types based on pointer size | 215 // Pick the appropriate map and cache types based on pointer size |
220 typedef MapSelector<kAddressBits>::Type PageMap; | 216 typedef MapSelector<kAddressBits>::Type PageMap; |
221 typedef MapSelector<kAddressBits>::CacheType PageMapCache; | 217 typedef MapSelector<kAddressBits>::CacheType PageMapCache; |
222 PageMap pagemap_; | 218 PageMap pagemap_; |
223 mutable PageMapCache pagemap_cache_; | 219 mutable PageMapCache pagemap_cache_; |
224 | 220 |
225 // We segregate spans of a given size into two circular linked | 221 // We segregate spans of a given size into two circular linked |
226 // lists: one for normal spans, and one for spans whose memory | 222 // lists: one for normal spans, and one for spans whose memory |
227 // has been returned to the system. | 223 // has been returned to the system. |
228 struct SpanList { | 224 struct SpanList { |
229 Span normal; | 225 Span normal; |
230 Span returned; | 226 Span returned; |
231 }; | 227 }; |
232 | 228 |
233 // List of free spans of length >= kMaxPages | 229 // List of free spans of length >= kMaxPages |
234 SpanList large_; | 230 SpanList large_; |
235 | 231 |
236 // Array mapping from span length to a doubly linked list of free spans | 232 // Array mapping from span length to a doubly linked list of free spans |
237 SpanList free_[kMaxPages]; | 233 SpanList free_[kMaxPages]; |
238 | 234 |
239 // Statistics on system, free, and unmapped bytes | 235 // Statistics on system, free, and unmapped bytes |
240 Stats stats_; | 236 Stats stats_; |
| 237 |
241 Span* SearchFreeAndLargeLists(Length n); | 238 Span* SearchFreeAndLargeLists(Length n); |
242 | 239 |
243 bool GrowHeap(Length n); | 240 bool GrowHeap(Length n); |
244 | 241 |
245 // REQUIRES: span->length >= n | 242 // REQUIRES: span->length >= n |
246 // REQUIRES: span->location != IN_USE | 243 // REQUIRES: span->location != IN_USE |
247 // Remove span from its free list, and move any leftover part of | 244 // Remove span from its free list, and move any leftover part of |
248 // span into appropriate free lists. Also update "span" to have | 245 // span into appropriate free lists. Also update "span" to have |
249 // length exactly "n" and mark it as non-free so it can be returned | 246 // length exactly "n" and mark it as non-free so it can be returned |
250 // to the client. After all that, decrease free_pages_ by n and | 247 // to the client. After all that, decrease free_pages_ by n and |
251 // return span. | 248 // return span. |
252 Span* Carve(Span* span, Length n); | 249 Span* Carve(Span* span, Length n); |
253 | 250 |
254 void RecordSpan(Span* span) { | 251 void RecordSpan(Span* span) { |
255 pagemap_.set(span->start, span); | 252 pagemap_.set(span->start, span); |
256 if (span->length > 1) { | 253 if (span->length > 1) { |
257 pagemap_.set(span->start + span->length - 1, span); | 254 pagemap_.set(span->start + span->length - 1, span); |
258 } | 255 } |
259 } | 256 } |
260 | 257 |
261 // Allocate a large span of length == n. If successful, returns a | 258 // Allocate a large span of length == n. If successful, returns a |
262 // span of exactly the specified length. Else, returns NULL. | 259 // span of exactly the specified length. Else, returns NULL. |
263 Span* AllocLarge(Length n); | 260 Span* AllocLarge(Length n); |
264 | 261 |
265 // Coalesce span with neighboring spans if possible, prepend to | 262 // Coalesce span with neighboring spans if possible, prepend to |
266 // appropriate free list, and adjust stats. | 263 // appropriate free list, and adjust stats. |
267 void MergeIntoFreeList(Span* span); | 264 void MergeIntoFreeList(Span* span); |
268 | 265 |
269 // Commit the span. | |
270 void CommitSpan(Span* span); | |
271 | |
272 // Decommit the span. | |
273 void DecommitSpan(Span* span); | |
274 | |
275 // Prepends span to appropriate free list, and adjusts stats. | 266 // Prepends span to appropriate free list, and adjusts stats. |
276 void PrependToFreeList(Span* span); | 267 void PrependToFreeList(Span* span); |
277 | 268 |
278 // Removes span from its free list, and adjust stats. | 269 // Removes span from its free list, and adjust stats. |
279 void RemoveFromFreeList(Span* span); | 270 void RemoveFromFreeList(Span* span); |
280 | 271 |
281 // Incrementally release some memory to the system. | 272 // Incrementally release some memory to the system. |
282 // IncrementalScavenge(n) is called whenever n pages are freed. | 273 // IncrementalScavenge(n) is called whenever n pages are freed. |
283 void IncrementalScavenge(Length n); | 274 void IncrementalScavenge(Length n); |
284 | 275 |
285 // Release the last span on the normal portion of this list. | 276 // Release the last span on the normal portion of this list. |
286 // Return the length of that span. | 277 // Return the length of that span. |
287 Length ReleaseLastNormalSpan(SpanList* slist); | 278 Length ReleaseLastNormalSpan(SpanList* slist); |
288 | 279 |
289 | 280 |
290 // Number of pages to deallocate before doing more scavenging | 281 // Number of pages to deallocate before doing more scavenging |
291 int64_t scavenge_counter_; | 282 int64_t scavenge_counter_; |
292 | 283 |
293 // Index of last free list where we released memory to the OS. | 284 // Index of last free list where we released memory to the OS. |
294 int release_index_; | 285 int release_index_; |
295 }; | 286 }; |
296 | 287 |
297 } // namespace tcmalloc | 288 } // namespace tcmalloc |
298 | 289 |
299 #ifdef _MSC_VER | 290 #ifdef _MSC_VER |
300 #pragma warning(pop) | 291 #pragma warning(pop) |
301 #endif | 292 #endif |
302 | 293 |
303 #endif // TCMALLOC_PAGE_HEAP_H_ | 294 #endif // TCMALLOC_PAGE_HEAP_H_ |
OLD | NEW |