OLD | NEW |
1 // Copyright (c) 2008, Google Inc. | 1 // Copyright (c) 2008, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
68 struct MallocRange; | 68 struct MallocRange; |
69 } | 69 } |
70 | 70 |
71 namespace tcmalloc { | 71 namespace tcmalloc { |
72 | 72 |
73 // ------------------------------------------------------------------------- | 73 // ------------------------------------------------------------------------- |
74 // Map from page-id to per-page data | 74 // Map from page-id to per-page data |
75 // ------------------------------------------------------------------------- | 75 // ------------------------------------------------------------------------- |
76 | 76 |
77 // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines. | 77 // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines. |
| 78 // ...except... |
| 79 // On Windows, we use TCMalloc_PageMap1_LazyCommit<> for 32-bit machines. |
78 // We also use a simple one-level cache for hot PageID-to-sizeclass mappings, | 80 // We also use a simple one-level cache for hot PageID-to-sizeclass mappings, |
79 // because sometimes the sizeclass is all the information we need. | 81 // because sometimes the sizeclass is all the information we need. |
80 | 82 |
81 // Selector class -- general selector uses 3-level map | 83 // Selector class -- general selector uses 3-level map |
82 template <int BITS> class MapSelector { | 84 template <int BITS> class MapSelector { |
83 public: | 85 public: |
84 typedef TCMalloc_PageMap3<BITS-kPageShift> Type; | 86 typedef TCMalloc_PageMap3<BITS-kPageShift> Type; |
85 typedef PackedCache<BITS-kPageShift, uint64_t> CacheType; | 87 typedef PackedCache<BITS-kPageShift, uint64_t> CacheType; |
86 }; | 88 }; |
87 | 89 |
88 // A two-level map for 32-bit machines | 90 // A two-level map for 32-bit machines |
89 template <> class MapSelector<32> { | 91 template <> class MapSelector<32> { |
90 public: | 92 public: |
| 93 #ifdef WIN32 |
| 94 // A flat map for 32-bit machines (with lazy commit of memory). |
| 95 typedef TCMalloc_PageMap1_LazyCommit<32-kPageShift> Type; |
| 96 #else |
| 97 // A two-level map for 32-bit machines |
91 typedef TCMalloc_PageMap2<32-kPageShift> Type; | 98 typedef TCMalloc_PageMap2<32-kPageShift> Type; |
| 99 #endif |
92 typedef PackedCache<32-kPageShift, uint16_t> CacheType; | 100 typedef PackedCache<32-kPageShift, uint16_t> CacheType; |
93 }; | 101 }; |
94 | 102 |
95 // ------------------------------------------------------------------------- | 103 // ------------------------------------------------------------------------- |
96 // Page-level allocator | 104 // Page-level allocator |
97 // * Eager coalescing | 105 // * Eager coalescing |
98 // | 106 // |
99 // Heap for page-level allocation. We allow allocating and freeing a | 107 // Heap for page-level allocation. We allow allocating and freeing a |
100 // contiguous runs of pages (called a "span"). | 108 // contiguous runs of pages (called a "span"). |
101 // ------------------------------------------------------------------------- | 109 // ------------------------------------------------------------------------- |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
139 // If this page heap is managing a range with starting page # >= start, | 147 // If this page heap is managing a range with starting page # >= start, |
140 // store info about the range in *r and return true. Else return false. | 148 // store info about the range in *r and return true. Else return false. |
141 bool GetNextRange(PageID start, base::MallocRange* r); | 149 bool GetNextRange(PageID start, base::MallocRange* r); |
142 | 150 |
143 // Page heap statistics | 151 // Page heap statistics |
144 struct Stats { | 152 struct Stats { |
145 Stats() : system_bytes(0), free_bytes(0), unmapped_bytes(0) {} | 153 Stats() : system_bytes(0), free_bytes(0), unmapped_bytes(0) {} |
146 uint64_t system_bytes; // Total bytes allocated from system | 154 uint64_t system_bytes; // Total bytes allocated from system |
147 uint64_t free_bytes; // Total bytes on normal freelists | 155 uint64_t free_bytes; // Total bytes on normal freelists |
148 uint64_t unmapped_bytes; // Total bytes on returned freelists | 156 uint64_t unmapped_bytes; // Total bytes on returned freelists |
| 157 uint64_t committed_bytes; // Bytes committed, always <= system_bytes_. |
| 158 |
149 }; | 159 }; |
150 inline Stats stats() const { return stats_; } | 160 inline Stats stats() const { return stats_; } |
151 | 161 |
152 struct SmallSpanStats { | 162 struct SmallSpanStats { |
153 // For each free list of small spans, the length (in spans) of the | 163 // For each free list of small spans, the length (in spans) of the |
154 // normal and returned free lists for that size. | 164 // normal and returned free lists for that size. |
155 int64 normal_length[kMaxPages]; | 165 int64 normal_length[kMaxPages]; |
156 int64 returned_length[kMaxPages]; | 166 int64 returned_length[kMaxPages]; |
157 }; | 167 }; |
158 void GetSmallSpanStats(SmallSpanStats* result); | 168 void GetSmallSpanStats(SmallSpanStats* result); |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
199 // overhead, and also to reduce external fragementation. Also, we | 209 // overhead, and also to reduce external fragementation. Also, we |
200 // should keep this value big because various incarnations of Linux | 210 // should keep this value big because various incarnations of Linux |
201 // have small limits on the number of mmap() regions per | 211 // have small limits on the number of mmap() regions per |
202 // address-space. | 212 // address-space. |
203 // REQUIRED: kMinSystemAlloc <= kMaxPages; | 213 // REQUIRED: kMinSystemAlloc <= kMaxPages; |
204 static const int kMinSystemAlloc = kMaxPages; | 214 static const int kMinSystemAlloc = kMaxPages; |
205 | 215 |
206 // Never delay scavenging for more than the following number of | 216 // Never delay scavenging for more than the following number of |
207 // deallocated pages. With 4K pages, this comes to 4GB of | 217 // deallocated pages. With 4K pages, this comes to 4GB of |
208 // deallocation. | 218 // deallocation. |
209 static const int kMaxReleaseDelay = 1 << 20; | 219 // Chrome: Changed to 64MB |
| 220 static const int kMaxReleaseDelay = 1 << 14; |
210 | 221 |
211 // If there is nothing to release, wait for so many pages before | 222 // If there is nothing to release, wait for so many pages before |
212 // scavenging again. With 4K pages, this comes to 1GB of memory. | 223 // scavenging again. With 4K pages, this comes to 1GB of memory. |
213 static const int kDefaultReleaseDelay = 1 << 18; | 224 // Chrome: Changed to 16MB |
| 225 static const int kDefaultReleaseDelay = 1 << 12; |
214 | 226 |
215 // Pick the appropriate map and cache types based on pointer size | 227 // Pick the appropriate map and cache types based on pointer size |
216 typedef MapSelector<kAddressBits>::Type PageMap; | 228 typedef MapSelector<kAddressBits>::Type PageMap; |
217 typedef MapSelector<kAddressBits>::CacheType PageMapCache; | 229 typedef MapSelector<kAddressBits>::CacheType PageMapCache; |
218 PageMap pagemap_; | 230 PageMap pagemap_; |
219 mutable PageMapCache pagemap_cache_; | 231 mutable PageMapCache pagemap_cache_; |
220 | 232 |
221 // We segregate spans of a given size into two circular linked | 233 // We segregate spans of a given size into two circular linked |
222 // lists: one for normal spans, and one for spans whose memory | 234 // lists: one for normal spans, and one for spans whose memory |
223 // has been returned to the system. | 235 // has been returned to the system. |
224 struct SpanList { | 236 struct SpanList { |
225 Span normal; | 237 Span normal; |
226 Span returned; | 238 Span returned; |
227 }; | 239 }; |
228 | 240 |
229 // List of free spans of length >= kMaxPages | 241 // List of free spans of length >= kMaxPages |
230 SpanList large_; | 242 SpanList large_; |
231 | 243 |
232 // Array mapping from span length to a doubly linked list of free spans | 244 // Array mapping from span length to a doubly linked list of free spans |
233 SpanList free_[kMaxPages]; | 245 SpanList free_[kMaxPages]; |
234 | 246 |
235 // Statistics on system, free, and unmapped bytes | 247 // Statistics on system, free, and unmapped bytes |
236 Stats stats_; | 248 Stats stats_; |
237 | |
238 Span* SearchFreeAndLargeLists(Length n); | 249 Span* SearchFreeAndLargeLists(Length n); |
239 | 250 |
240 bool GrowHeap(Length n); | 251 bool GrowHeap(Length n); |
241 | 252 |
242 // REQUIRES: span->length >= n | 253 // REQUIRES: span->length >= n |
243 // REQUIRES: span->location != IN_USE | 254 // REQUIRES: span->location != IN_USE |
244 // Remove span from its free list, and move any leftover part of | 255 // Remove span from its free list, and move any leftover part of |
245 // span into appropriate free lists. Also update "span" to have | 256 // span into appropriate free lists. Also update "span" to have |
246 // length exactly "n" and mark it as non-free so it can be returned | 257 // length exactly "n" and mark it as non-free so it can be returned |
247 // to the client. After all that, decrease free_pages_ by n and | 258 // to the client. After all that, decrease free_pages_ by n and |
248 // return span. | 259 // return span. |
249 Span* Carve(Span* span, Length n); | 260 Span* Carve(Span* span, Length n); |
250 | 261 |
251 void RecordSpan(Span* span) { | 262 void RecordSpan(Span* span) { |
252 pagemap_.set(span->start, span); | 263 pagemap_.set(span->start, span); |
253 if (span->length > 1) { | 264 if (span->length > 1) { |
254 pagemap_.set(span->start + span->length - 1, span); | 265 pagemap_.set(span->start + span->length - 1, span); |
255 } | 266 } |
256 } | 267 } |
257 | 268 |
258 // Allocate a large span of length == n. If successful, returns a | 269 // Allocate a large span of length == n. If successful, returns a |
259 // span of exactly the specified length. Else, returns NULL. | 270 // span of exactly the specified length. Else, returns NULL. |
260 Span* AllocLarge(Length n); | 271 Span* AllocLarge(Length n); |
261 | 272 |
262 // Coalesce span with neighboring spans if possible, prepend to | 273 // Coalesce span with neighboring spans if possible, prepend to |
263 // appropriate free list, and adjust stats. | 274 // appropriate free list, and adjust stats. |
264 void MergeIntoFreeList(Span* span); | 275 void MergeIntoFreeList(Span* span); |
265 | 276 |
| 277 // Commit the span. |
| 278 void CommitSpan(Span* span); |
| 279 |
| 280 // Decommit the span. |
| 281 void DecommitSpan(Span* span); |
| 282 |
266 // Prepends span to appropriate free list, and adjusts stats. | 283 // Prepends span to appropriate free list, and adjusts stats. |
267 void PrependToFreeList(Span* span); | 284 void PrependToFreeList(Span* span); |
268 | 285 |
269 // Removes span from its free list, and adjust stats. | 286 // Removes span from its free list, and adjust stats. |
270 void RemoveFromFreeList(Span* span); | 287 void RemoveFromFreeList(Span* span); |
271 | 288 |
272 // Incrementally release some memory to the system. | 289 // Incrementally release some memory to the system. |
273 // IncrementalScavenge(n) is called whenever n pages are freed. | 290 // IncrementalScavenge(n) is called whenever n pages are freed. |
274 void IncrementalScavenge(Length n); | 291 void IncrementalScavenge(Length n); |
275 | 292 |
276 // Release the last span on the normal portion of this list. | 293 // Release the last span on the normal portion of this list. |
277 // Return the length of that span. | 294 // Return the length of that span. |
278 Length ReleaseLastNormalSpan(SpanList* slist); | 295 Length ReleaseLastNormalSpan(SpanList* slist); |
279 | 296 |
280 | 297 |
281 // Number of pages to deallocate before doing more scavenging | 298 // Number of pages to deallocate before doing more scavenging |
282 int64_t scavenge_counter_; | 299 int64_t scavenge_counter_; |
283 | 300 |
284 // Index of last free list where we released memory to the OS. | 301 // Index of last free list where we released memory to the OS. |
285 int release_index_; | 302 int release_index_; |
286 }; | 303 }; |
287 | 304 |
288 } // namespace tcmalloc | 305 } // namespace tcmalloc |
289 | 306 |
290 #ifdef _MSC_VER | 307 #ifdef _MSC_VER |
291 #pragma warning(pop) | 308 #pragma warning(pop) |
292 #endif | 309 #endif |
293 | 310 |
294 #endif // TCMALLOC_PAGE_HEAP_H_ | 311 #endif // TCMALLOC_PAGE_HEAP_H_ |
OLD | NEW |