OLD | NEW |
1 // Copyright (c) 2008, Google Inc. | 1 // Copyright (c) 2008, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
69 struct MallocRange; | 69 struct MallocRange; |
70 } | 70 } |
71 | 71 |
72 namespace tcmalloc { | 72 namespace tcmalloc { |
73 | 73 |
74 // ------------------------------------------------------------------------- | 74 // ------------------------------------------------------------------------- |
75 // Map from page-id to per-page data | 75 // Map from page-id to per-page data |
76 // ------------------------------------------------------------------------- | 76 // ------------------------------------------------------------------------- |
77 | 77 |
78 // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines. | 78 // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines. |
| 79 // ...except... |
| 80 // On Windows, we use TCMalloc_PageMap1_LazyCommit<> for 32-bit machines. |
79 // We also use a simple one-level cache for hot PageID-to-sizeclass mappings, | 81 // We also use a simple one-level cache for hot PageID-to-sizeclass mappings, |
80 // because sometimes the sizeclass is all the information we need. | 82 // because sometimes the sizeclass is all the information we need. |
81 | 83 |
82 // Selector class -- general selector uses 3-level map | 84 // Selector class -- general selector uses 3-level map |
83 template <int BITS> class MapSelector { | 85 template <int BITS> class MapSelector { |
84 public: | 86 public: |
85 typedef TCMalloc_PageMap3<BITS-kPageShift> Type; | 87 typedef TCMalloc_PageMap3<BITS-kPageShift> Type; |
86 typedef PackedCache<BITS-kPageShift, uint64_t> CacheType; | 88 typedef PackedCache<BITS-kPageShift, uint64_t> CacheType; |
87 }; | 89 }; |
88 | 90 |
89 // A two-level map for 32-bit machines | 91 // A two-level map for 32-bit machines |
90 template <> class MapSelector<32> { | 92 template <> class MapSelector<32> { |
91 public: | 93 public: |
| 94 #ifdef WIN32 |
| 95 // A flat map for 32-bit machines (with lazy commit of memory). |
| 96 typedef TCMalloc_PageMap1_LazyCommit<32-kPageShift> Type; |
| 97 #else |
| 98 // A two-level map for 32-bit machines |
92 typedef TCMalloc_PageMap2<32-kPageShift> Type; | 99 typedef TCMalloc_PageMap2<32-kPageShift> Type; |
| 100 #endif |
93 typedef PackedCache<32-kPageShift, uint16_t> CacheType; | 101 typedef PackedCache<32-kPageShift, uint16_t> CacheType; |
94 }; | 102 }; |
95 | 103 |
96 // ------------------------------------------------------------------------- | 104 // ------------------------------------------------------------------------- |
97 // Page-level allocator | 105 // Page-level allocator |
98 // * Eager coalescing | 106 // * Eager coalescing |
99 // | 107 // |
100 // Heap for page-level allocation. We allow allocating and freeing a | 108 // Heap for page-level allocation. We allow allocating and freeing a |
101 // contiguous runs of pages (called a "span"). | 109 // contiguous runs of pages (called a "span"). |
102 // ------------------------------------------------------------------------- | 110 // ------------------------------------------------------------------------- |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
143 // If this page heap is managing a range with starting page # >= start, | 151 // If this page heap is managing a range with starting page # >= start, |
144 // store info about the range in *r and return true. Else return false. | 152 // store info about the range in *r and return true. Else return false. |
145 bool GetNextRange(PageID start, base::MallocRange* r); | 153 bool GetNextRange(PageID start, base::MallocRange* r); |
146 | 154 |
147 // Page heap statistics | 155 // Page heap statistics |
148 struct Stats { | 156 struct Stats { |
149 Stats() : system_bytes(0), free_bytes(0), unmapped_bytes(0) {} | 157 Stats() : system_bytes(0), free_bytes(0), unmapped_bytes(0) {} |
150 uint64_t system_bytes; // Total bytes allocated from system | 158 uint64_t system_bytes; // Total bytes allocated from system |
151 uint64_t free_bytes; // Total bytes on normal freelists | 159 uint64_t free_bytes; // Total bytes on normal freelists |
152 uint64_t unmapped_bytes; // Total bytes on returned freelists | 160 uint64_t unmapped_bytes; // Total bytes on returned freelists |
| 161 uint64_t committed_bytes; // Bytes committed, always <= system_bytes_. |
| 162 |
153 }; | 163 }; |
154 inline Stats stats() const { return stats_; } | 164 inline Stats stats() const { return stats_; } |
155 void GetClassSizes(int64 class_sizes_normal[kMaxPages], | 165 void GetClassSizes(int64 class_sizes_normal[kMaxPages], |
156 int64 class_sizes_returned[kMaxPages], | 166 int64 class_sizes_returned[kMaxPages], |
157 int64* normal_pages_in_spans, | 167 int64* normal_pages_in_spans, |
158 int64* returned_pages_in_spans); | 168 int64* returned_pages_in_spans); |
159 | 169 |
160 bool Check(); | 170 bool Check(); |
161 // Like Check() but does some more comprehensive checking. | 171 // Like Check() but does some more comprehensive checking. |
162 bool CheckExpensive(); | 172 bool CheckExpensive(); |
(...skipping 28 matching lines...) Expand all Loading... |
191 // overhead, and also to reduce external fragementation. Also, we | 201 // overhead, and also to reduce external fragementation. Also, we |
192 // should keep this value big because various incarnations of Linux | 202 // should keep this value big because various incarnations of Linux |
193 // have small limits on the number of mmap() regions per | 203 // have small limits on the number of mmap() regions per |
194 // address-space. | 204 // address-space. |
195 // REQUIRED: kMinSystemAlloc <= kMaxPages; | 205 // REQUIRED: kMinSystemAlloc <= kMaxPages; |
196 static const int kMinSystemAlloc = kMaxPages; | 206 static const int kMinSystemAlloc = kMaxPages; |
197 | 207 |
198 // Never delay scavenging for more than the following number of | 208 // Never delay scavenging for more than the following number of |
199 // deallocated pages. With 4K pages, this comes to 4GB of | 209 // deallocated pages. With 4K pages, this comes to 4GB of |
200 // deallocation. | 210 // deallocation. |
201 static const int kMaxReleaseDelay = 1 << 20; | 211 // Chrome: Changed to 64MB |
| 212 static const int kMaxReleaseDelay = 1 << 14; |
202 | 213 |
203 // If there is nothing to release, wait for so many pages before | 214 // If there is nothing to release, wait for so many pages before |
204 // scavenging again. With 4K pages, this comes to 1GB of memory. | 215 // scavenging again. With 4K pages, this comes to 1GB of memory. |
205 static const int kDefaultReleaseDelay = 1 << 18; | 216 // Chrome: Changed to 16MB |
| 217 static const int kDefaultReleaseDelay = 1 << 12; |
206 | 218 |
207 // Pick the appropriate map and cache types based on pointer size | 219 // Pick the appropriate map and cache types based on pointer size |
208 typedef MapSelector<kAddressBits>::Type PageMap; | 220 typedef MapSelector<kAddressBits>::Type PageMap; |
209 typedef MapSelector<kAddressBits>::CacheType PageMapCache; | 221 typedef MapSelector<kAddressBits>::CacheType PageMapCache; |
210 PageMap pagemap_; | 222 PageMap pagemap_; |
211 mutable PageMapCache pagemap_cache_; | 223 mutable PageMapCache pagemap_cache_; |
212 | 224 |
213 // We segregate spans of a given size into two circular linked | 225 // We segregate spans of a given size into two circular linked |
214 // lists: one for normal spans, and one for spans whose memory | 226 // lists: one for normal spans, and one for spans whose memory |
215 // has been returned to the system. | 227 // has been returned to the system. |
216 struct SpanList { | 228 struct SpanList { |
217 Span normal; | 229 Span normal; |
218 Span returned; | 230 Span returned; |
219 }; | 231 }; |
220 | 232 |
221 // List of free spans of length >= kMaxPages | 233 // List of free spans of length >= kMaxPages |
222 SpanList large_; | 234 SpanList large_; |
223 | 235 |
224 // Array mapping from span length to a doubly linked list of free spans | 236 // Array mapping from span length to a doubly linked list of free spans |
225 SpanList free_[kMaxPages]; | 237 SpanList free_[kMaxPages]; |
226 | 238 |
227 // Statistics on system, free, and unmapped bytes | 239 // Statistics on system, free, and unmapped bytes |
228 Stats stats_; | 240 Stats stats_; |
229 | |
230 Span* SearchFreeAndLargeLists(Length n); | 241 Span* SearchFreeAndLargeLists(Length n); |
231 | 242 |
232 bool GrowHeap(Length n); | 243 bool GrowHeap(Length n); |
233 | 244 |
234 // REQUIRES: span->length >= n | 245 // REQUIRES: span->length >= n |
235 // REQUIRES: span->location != IN_USE | 246 // REQUIRES: span->location != IN_USE |
236 // Remove span from its free list, and move any leftover part of | 247 // Remove span from its free list, and move any leftover part of |
237 // span into appropriate free lists. Also update "span" to have | 248 // span into appropriate free lists. Also update "span" to have |
238 // length exactly "n" and mark it as non-free so it can be returned | 249 // length exactly "n" and mark it as non-free so it can be returned |
239 // to the client. After all that, decrease free_pages_ by n and | 250 // to the client. After all that, decrease free_pages_ by n and |
240 // return span. | 251 // return span. |
241 Span* Carve(Span* span, Length n); | 252 Span* Carve(Span* span, Length n); |
242 | 253 |
243 void RecordSpan(Span* span) { | 254 void RecordSpan(Span* span) { |
244 pagemap_.set(span->start, span); | 255 pagemap_.set(span->start, span); |
245 if (span->length > 1) { | 256 if (span->length > 1) { |
246 pagemap_.set(span->start + span->length - 1, span); | 257 pagemap_.set(span->start + span->length - 1, span); |
247 } | 258 } |
248 } | 259 } |
249 | 260 |
250 // Allocate a large span of length == n. If successful, returns a | 261 // Allocate a large span of length == n. If successful, returns a |
251 // span of exactly the specified length. Else, returns NULL. | 262 // span of exactly the specified length. Else, returns NULL. |
252 Span* AllocLarge(Length n); | 263 Span* AllocLarge(Length n); |
253 | 264 |
254 // Coalesce span with neighboring spans if possible, prepend to | 265 // Coalesce span with neighboring spans if possible, prepend to |
255 // appropriate free list, and adjust stats. | 266 // appropriate free list, and adjust stats. |
256 void MergeIntoFreeList(Span* span); | 267 void MergeIntoFreeList(Span* span); |
257 | 268 |
| 269 // Commit the span. |
| 270 void CommitSpan(Span* span); |
| 271 |
| 272 // Decommit the span. |
| 273 void DecommitSpan(Span* span); |
| 274 |
258 // Prepends span to appropriate free list, and adjusts stats. | 275 // Prepends span to appropriate free list, and adjusts stats. |
259 void PrependToFreeList(Span* span); | 276 void PrependToFreeList(Span* span); |
260 | 277 |
261 // Removes span from its free list, and adjust stats. | 278 // Removes span from its free list, and adjust stats. |
262 void RemoveFromFreeList(Span* span); | 279 void RemoveFromFreeList(Span* span); |
263 | 280 |
264 // Incrementally release some memory to the system. | 281 // Incrementally release some memory to the system. |
265 // IncrementalScavenge(n) is called whenever n pages are freed. | 282 // IncrementalScavenge(n) is called whenever n pages are freed. |
266 void IncrementalScavenge(Length n); | 283 void IncrementalScavenge(Length n); |
267 | 284 |
268 // Release the last span on the normal portion of this list. | 285 // Release the last span on the normal portion of this list. |
269 // Return the length of that span. | 286 // Return the length of that span. |
270 Length ReleaseLastNormalSpan(SpanList* slist); | 287 Length ReleaseLastNormalSpan(SpanList* slist); |
271 | 288 |
272 | 289 |
273 // Number of pages to deallocate before doing more scavenging | 290 // Number of pages to deallocate before doing more scavenging |
274 int64_t scavenge_counter_; | 291 int64_t scavenge_counter_; |
275 | 292 |
276 // Index of last free list where we released memory to the OS. | 293 // Index of last free list where we released memory to the OS. |
277 int release_index_; | 294 int release_index_; |
278 }; | 295 }; |
279 | 296 |
280 } // namespace tcmalloc | 297 } // namespace tcmalloc |
281 | 298 |
282 #ifdef _MSC_VER | 299 #ifdef _MSC_VER |
283 #pragma warning(pop) | 300 #pragma warning(pop) |
284 #endif | 301 #endif |
285 | 302 |
286 #endif // TCMALLOC_PAGE_HEAP_H_ | 303 #endif // TCMALLOC_PAGE_HEAP_H_ |
OLD | NEW |