OLD | NEW |
1 // Copyright (c) 2008, Google Inc. | 1 // Copyright (c) 2008, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
44 "to return memory slower. Reasonable rates are in the " | 44 "to return memory slower. Reasonable rates are in the " |
45 "range [0,10]"); | 45 "range [0,10]"); |
46 | 46 |
47 namespace tcmalloc { | 47 namespace tcmalloc { |
48 | 48 |
49 PageHeap::PageHeap() | 49 PageHeap::PageHeap() |
50 : pagemap_(MetaDataAlloc), | 50 : pagemap_(MetaDataAlloc), |
51 pagemap_cache_(0), | 51 pagemap_cache_(0), |
52 free_pages_(0), | 52 free_pages_(0), |
53 system_bytes_(0), | 53 system_bytes_(0), |
| 54 #if DEFER_DECOMMIT |
| 55 free_committed_pages_(0), |
| 56 pages_committed_since_last_scavenge_(0), |
| 57 #else |
54 scavenge_counter_(0), | 58 scavenge_counter_(0), |
| 59 #endif |
55 // Start scavenging at kMaxPages list | 60 // Start scavenging at kMaxPages list |
56 scavenge_index_(kMaxPages-1) { | 61 scavenge_index_(kMaxPages-1) { |
57 COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits); | 62 COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits); |
58 DLL_Init(&large_.normal); | 63 DLL_Init(&large_.normal); |
59 DLL_Init(&large_.returned); | 64 DLL_Init(&large_.returned); |
60 for (int i = 0; i < kMaxPages; i++) { | 65 for (int i = 0; i < kMaxPages; i++) { |
61 DLL_Init(&free_[i].normal); | 66 DLL_Init(&free_[i].normal); |
62 DLL_Init(&free_[i].returned); | 67 DLL_Init(&free_[i].returned); |
63 } | 68 } |
64 } | 69 } |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
143 Span* leftover = NewSpan(span->start + n, extra); | 148 Span* leftover = NewSpan(span->start + n, extra); |
144 ASSERT(leftover->location == Span::IN_USE); | 149 ASSERT(leftover->location == Span::IN_USE); |
145 Event(leftover, 'U', extra); | 150 Event(leftover, 'U', extra); |
146 RecordSpan(leftover); | 151 RecordSpan(leftover); |
147 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span | 152 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span |
148 span->length = n; | 153 span->length = n; |
149 | 154 |
150 return leftover; | 155 return leftover; |
151 } | 156 } |
152 | 157 |
| 158 void PageHeap::CommitSpan(Span* span) { |
| 159 TCMalloc_SystemCommit( |
| 160 reinterpret_cast<void*>(span->start << kPageShift), |
| 161 static_cast<size_t>(span->length << kPageShift) |
| 162 ); |
| 163 #if DEFER_DECOMMIT |
| 164 pages_committed_since_last_scavenge_ += span->length; |
| 165 #endif |
| 166 } |
| 167 |
153 Span* PageHeap::Carve(Span* span, Length n) { | 168 Span* PageHeap::Carve(Span* span, Length n) { |
154 ASSERT(n > 0); | 169 ASSERT(n > 0); |
155 ASSERT(span->location != Span::IN_USE); | 170 ASSERT(span->location != Span::IN_USE); |
156 const int old_location = span->location; | 171 const int old_location = span->location; |
157 DLL_Remove(span); | 172 DLL_Remove(span); |
158 span->location = Span::IN_USE; | 173 span->location = Span::IN_USE; |
159 Event(span, 'A', n); | 174 Event(span, 'A', n); |
160 | 175 |
161 const int extra = span->length - n; | 176 const int extra = span->length - n; |
162 ASSERT(extra >= 0); | 177 ASSERT(extra >= 0); |
163 if (extra > 0) { | 178 if (extra > 0) { |
164 Span* leftover = NewSpan(span->start + n, extra); | 179 Span* leftover = NewSpan(span->start + n, extra); |
165 leftover->location = old_location; | 180 leftover->location = old_location; |
166 Event(leftover, 'S', extra); | 181 Event(leftover, 'S', extra); |
167 RecordSpan(leftover); | 182 RecordSpan(leftover); |
168 | 183 |
169 // Place leftover span on appropriate free list | 184 // Place leftover span on appropriate free list |
170 SpanList* listpair = (extra < kMaxPages) ? &free_[extra] : &large_; | 185 SpanList* listpair = (extra < kMaxPages) ? &free_[extra] : &large_; |
171 Span* dst = (leftover->location == Span::ON_RETURNED_FREELIST | 186 Span* dst = (leftover->location == Span::ON_RETURNED_FREELIST |
172 ? &listpair->returned : &listpair->normal); | 187 ? &listpair->returned : &listpair->normal); |
173 DLL_Prepend(dst, leftover); | 188 DLL_Prepend(dst, leftover); |
174 | 189 |
175 span->length = n; | 190 span->length = n; |
176 pagemap_.set(span->start + n - 1, span); | 191 pagemap_.set(span->start + n - 1, span); |
177 } | 192 } |
| 193 if (old_location == Span::ON_RETURNED_FREELIST) { |
| 194 // We need to recommit this address space. |
| 195 CommitSpan(span); |
| 196 } else { |
| 197 #if DEFER_DECOMMIT |
| 198 // The newly allocated memory is from a span that's already committed. |
| 199 // Update the free_committed_pages_ count. |
| 200 ASSERT(free_committed_pages_ >= n); |
| 201 free_committed_pages_ -= n; |
| 202 #endif |
| 203 } |
| 204 ASSERT(span->location == Span::IN_USE); |
| 205 ASSERT(span->length == n); |
178 ASSERT(Check()); | 206 ASSERT(Check()); |
179 free_pages_ -= n; | 207 free_pages_ -= n; |
180 if (old_location == Span::ON_RETURNED_FREELIST) { | |
181 // We need to recommit this address space. | |
182 TCMalloc_SystemCommit( | |
183 reinterpret_cast<void*>(span->start << kPageShift), | |
184 static_cast<size_t>(span->length << kPageShift)); | |
185 } | |
186 return span; | 208 return span; |
187 } | 209 } |
188 | 210 |
189 void PageHeap::Delete(Span* span) { | 211 void PageHeap::Delete(Span* span) { |
190 ASSERT(Check()); | 212 ASSERT(Check()); |
191 ASSERT(span->location == Span::IN_USE); | 213 ASSERT(span->location == Span::IN_USE); |
192 ASSERT(span->length > 0); | 214 ASSERT(span->length > 0); |
193 ASSERT(GetDescriptor(span->start) == span); | 215 ASSERT(GetDescriptor(span->start) == span); |
194 ASSERT(GetDescriptor(span->start + span->length - 1) == span); | 216 ASSERT(GetDescriptor(span->start + span->length - 1) == span); |
195 span->sizeclass = 0; | 217 span->sizeclass = 0; |
196 span->sample = 0; | 218 span->sample = 0; |
197 | 219 |
198 // Coalesce -- we guarantee that "p" != 0, so no bounds checking | 220 // Coalesce -- we guarantee that "p" != 0, so no bounds checking |
199 // necessary. We do not bother resetting the stale pagemap | 221 // necessary. We do not bother resetting the stale pagemap |
200 // entries for the pieces we are merging together because we only | 222 // entries for the pieces we are merging together because we only |
201 // care about the pagemap entries for the boundaries. | 223 // care about the pagemap entries for the boundaries. |
202 // | 224 // |
203 // Note that the spans we merge into "span" may come out of | 225 // Note that the spans we merge into "span" may come out of |
204 // a "normal" list. For simplicity, we move these into the | 226 // a "returned" list. We move those into "normal" list |
205 // "returned" list of the appropriate size class. We do this | 227 // as for 'immediate' operations we favour committing over |
206 // so that we can maximize large, continuous blocks of freed | 228 // decommitting (decommitting is performed offline). |
207 // space. | |
208 const PageID p = span->start; | 229 const PageID p = span->start; |
209 const Length n = span->length; | 230 const Length n = span->length; |
210 Span* prev = GetDescriptor(p-1); | 231 Span* prev = GetDescriptor(p-1); |
211 if (prev != NULL && prev->location != Span::IN_USE) { | 232 if (prev != NULL && prev->location != Span::IN_USE) { |
212 // Merge preceding span into this span | 233 // Merge preceding span into this span |
213 ASSERT(prev->start + prev->length == p); | 234 ASSERT(prev->start + prev->length == p); |
214 const Length len = prev->length; | 235 const Length len = prev->length; |
| 236 if (prev->location == Span::ON_RETURNED_FREELIST) { |
| 237 CommitSpan(prev); |
| 238 #if DEFER_DECOMMIT |
| 239 free_committed_pages_ += len; |
| 240 #endif |
| 241 } |
215 DLL_Remove(prev); | 242 DLL_Remove(prev); |
216 DeleteSpan(prev); | 243 DeleteSpan(prev); |
217 span->start -= len; | 244 span->start -= len; |
218 span->length += len; | 245 span->length += len; |
219 pagemap_.set(span->start, span); | 246 pagemap_.set(span->start, span); |
220 Event(span, 'L', len); | 247 Event(span, 'L', len); |
221 } | 248 } |
222 Span* next = GetDescriptor(p+n); | 249 Span* next = GetDescriptor(p+n); |
223 if (next != NULL && next->location != Span::IN_USE) { | 250 if (next != NULL && next->location != Span::IN_USE) { |
224 // Merge next span into this span | 251 // Merge next span into this span |
225 ASSERT(next->start == p+n); | 252 ASSERT(next->start == p+n); |
226 const Length len = next->length; | 253 const Length len = next->length; |
| 254 if (next->location == Span::ON_RETURNED_FREELIST) { |
| 255 CommitSpan(next); |
| 256 #if DEFER_DECOMMIT |
| 257 free_committed_pages_ += len; |
| 258 #endif |
| 259 } |
227 DLL_Remove(next); | 260 DLL_Remove(next); |
228 DeleteSpan(next); | 261 DeleteSpan(next); |
229 span->length += len; | 262 span->length += len; |
230 pagemap_.set(span->start + span->length - 1, span); | 263 pagemap_.set(span->start + span->length - 1, span); |
231 Event(span, 'R', len); | 264 Event(span, 'R', len); |
232 } | 265 } |
233 | 266 |
234 Event(span, 'D', span->length); | 267 Event(span, 'D', span->length); |
235 span->location = Span::ON_RETURNED_FREELIST; | 268 span->location = Span::ON_NORMAL_FREELIST; |
236 TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift), | |
237 static_cast<size_t>(span->length << kPageShift)); | |
238 if (span->length < kMaxPages) | 269 if (span->length < kMaxPages) |
239 DLL_Prepend(&free_[span->length].returned, span); | 270 DLL_Prepend(&free_[span->length].normal, span); |
240 else | 271 else |
241 DLL_Prepend(&large_.returned, span); | 272 DLL_Prepend(&large_.normal, span); |
242 free_pages_ += n; | 273 free_pages_ += n; |
| 274 #if DEFER_DECOMMIT |
| 275 free_committed_pages_ += n; |
| 276 #endif |
243 | 277 |
| 278 #if DEFER_DECOMMIT |
| 279 // TODO(antonm): notify that could start scavenging |
| 280 #else |
244 IncrementalScavenge(n); | 281 IncrementalScavenge(n); |
| 282 #endif |
245 ASSERT(Check()); | 283 ASSERT(Check()); |
246 } | 284 } |
247 | 285 |
| 286 |
| 287 void PageHeap::Scavenge() { |
| 288 #if DEFER_DECOMMIT |
| 289 // If we have to commit memory since the last scavenge, it means we don't |
| 290 // have enough free committed pages of necessary size for the amount of |
| 291 // allocations that we do. So hold off on releasing memory back to the system
. |
| 292 if (pages_committed_since_last_scavenge_ > 0) { |
| 293 pages_committed_since_last_scavenge_ = 0; |
| 294 return; |
| 295 } |
| 296 |
| 297 if (free_committed_pages_ <= kMinimumFreeCommittedPageCount) { |
| 298 return; |
| 299 } |
| 300 |
| 301 uint64_t to_decommit = std::min( |
| 302 free_committed_pages_ - kMinimumFreeCommittedPageCount, |
| 303 free_committed_pages_ / kMaxScavengeAmountFactor); |
| 304 to_decommit = DecommitFromSpanList(&large_, to_decommit); |
| 305 for (int i = kMaxPages - 1; i >= 0; i--) { |
| 306 to_decommit = DecommitFromSpanList(&free_[i], to_decommit); |
| 307 } |
| 308 |
| 309 // Force at least one decommit from large list, otherwise big sized blocks |
| 310 // sitting might block as from releasing smaller blocks behind. |
| 311 if (to_decommit > 0) { |
| 312 if (!DLL_IsEmpty(&large_.normal)) { |
| 313 DecommitLastSpan(&large_, large_.normal.prev); |
| 314 } |
| 315 } |
| 316 #endif |
| 317 } |
| 318 |
| 319 #if DEFER_DECOMMIT |
| 320 Length PageHeap::DecommitLastSpan(SpanList* span_list, Span* span) { |
| 321 ASSERT(!DLL_IsEmpty(&span_list->normal)); |
| 322 ASSERT(span_list->normal.prev == span); |
| 323 |
| 324 Length length = span->length; |
| 325 |
| 326 DLL_Remove(span); |
| 327 |
| 328 TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift), spa
n->length << kPageShift); |
| 329 span->location = Span::ON_RETURNED_FREELIST; |
| 330 ASSERT(free_committed_pages_ >= length); |
| 331 free_committed_pages_ -= length; |
| 332 |
| 333 DLL_Prepend(&span_list->returned, span); |
| 334 |
| 335 return length; |
| 336 } |
| 337 |
| 338 uint64_t PageHeap::DecommitFromSpanList(SpanList* span_list, uint64_t to_decommi
t) { |
| 339 while (!DLL_IsEmpty(&span_list->normal)) { |
| 340 // Release the last span on the normal portion of this list. |
| 341 Span* span = span_list->normal.prev; |
| 342 |
| 343 if (span->length > to_decommit) { |
| 344 return to_decommit; |
| 345 } |
| 346 |
| 347 to_decommit -= DecommitLastSpan(span_list, span); |
| 348 } |
| 349 |
| 350 return to_decommit; |
| 351 } |
| 352 |
| 353 #else |
| 354 |
248 void PageHeap::IncrementalScavenge(Length n) { | 355 void PageHeap::IncrementalScavenge(Length n) { |
249 // Fast path; not yet time to release memory | 356 // Fast path; not yet time to release memory |
250 scavenge_counter_ -= n; | 357 scavenge_counter_ -= n; |
251 if (scavenge_counter_ >= 0) return; // Not yet time to scavenge | 358 if (scavenge_counter_ >= 0) return; // Not yet time to scavenge |
252 | 359 |
253 // Never delay scavenging for more than the following number of | 360 // Never delay scavenging for more than the following number of |
254 // deallocated pages. With 4K pages, this comes to 4GB of | 361 // deallocated pages. With 4K pages, this comes to 4GB of |
255 // deallocation. | 362 // deallocation. |
256 // Chrome: Changed to 64MB | 363 // Chrome: Changed to 64MB |
257 static const int kMaxReleaseDelay = 1 << 14; | 364 static const int kMaxReleaseDelay = 1 << 14; |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
297 scavenge_index_ = index; // Scavenge at index+1 next time | 404 scavenge_index_ = index; // Scavenge at index+1 next time |
298 // Note: we stop scavenging after finding one. | 405 // Note: we stop scavenging after finding one. |
299 return; | 406 return; |
300 } | 407 } |
301 index++; | 408 index++; |
302 } | 409 } |
303 | 410 |
304 // Nothing to scavenge, delay for a while | 411 // Nothing to scavenge, delay for a while |
305 scavenge_counter_ = kDefaultReleaseDelay; | 412 scavenge_counter_ = kDefaultReleaseDelay; |
306 } | 413 } |
| 414 #endif |
307 | 415 |
308 void PageHeap::RegisterSizeClass(Span* span, size_t sc) { | 416 void PageHeap::RegisterSizeClass(Span* span, size_t sc) { |
309 // Associate span object with all interior pages as well | 417 // Associate span object with all interior pages as well |
310 ASSERT(span->location == Span::IN_USE); | 418 ASSERT(span->location == Span::IN_USE); |
311 ASSERT(GetDescriptor(span->start) == span); | 419 ASSERT(GetDescriptor(span->start) == span); |
312 ASSERT(GetDescriptor(span->start+span->length-1) == span); | 420 ASSERT(GetDescriptor(span->start+span->length-1) == span); |
313 Event(span, 'C', sc); | 421 Event(span, 'C', sc); |
314 span->sizeclass = sc; | 422 span->sizeclass = sc; |
315 for (Length i = 1; i < span->length-1; i++) { | 423 for (Length i = 1; i < span->length-1; i++) { |
316 pagemap_.set(span->start+i, span); | 424 pagemap_.set(span->start+i, span); |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
398 void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); | 506 void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); |
399 if (ptr == NULL) { | 507 if (ptr == NULL) { |
400 if (n < ask) { | 508 if (n < ask) { |
401 // Try growing just "n" pages | 509 // Try growing just "n" pages |
402 ask = n; | 510 ask = n; |
403 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); | 511 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); |
404 } | 512 } |
405 if (ptr == NULL) return false; | 513 if (ptr == NULL) return false; |
406 } | 514 } |
407 ask = actual_size >> kPageShift; | 515 ask = actual_size >> kPageShift; |
| 516 #if DEFER_DECOMMIT |
| 517 pages_committed_since_last_scavenge_ += ask; |
| 518 #endif |
408 RecordGrowth(ask << kPageShift); | 519 RecordGrowth(ask << kPageShift); |
409 | 520 |
410 uint64_t old_system_bytes = system_bytes_; | 521 uint64_t old_system_bytes = system_bytes_; |
411 system_bytes_ += (ask << kPageShift); | 522 system_bytes_ += (ask << kPageShift); |
412 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; | 523 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; |
413 ASSERT(p > 0); | 524 ASSERT(p > 0); |
414 | 525 |
415 // If we have already a lot of pages allocated, just pre allocate a bunch of | 526 // If we have already a lot of pages allocated, just pre allocate a bunch of |
416 // memory for the page map. This prevents fragmentation by pagemap metadata | 527 // memory for the page map. This prevents fragmentation by pagemap metadata |
417 // when a program keeps allocating and freeing large blocks. | 528 // when a program keeps allocating and freeing large blocks. |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
483 static_cast<size_t>(s->length << kPageShift)); | 594 static_cast<size_t>(s->length << kPageShift)); |
484 } | 595 } |
485 } | 596 } |
486 | 597 |
487 void PageHeap::ReleaseFreePages() { | 598 void PageHeap::ReleaseFreePages() { |
488 for (Length s = 0; s < kMaxPages; s++) { | 599 for (Length s = 0; s < kMaxPages; s++) { |
489 ReleaseFreeList(&free_[s].normal, &free_[s].returned); | 600 ReleaseFreeList(&free_[s].normal, &free_[s].returned); |
490 } | 601 } |
491 ReleaseFreeList(&large_.normal, &large_.returned); | 602 ReleaseFreeList(&large_.normal, &large_.returned); |
492 ASSERT(Check()); | 603 ASSERT(Check()); |
| 604 #if DEFER_DECOMMIT |
| 605 free_committed_pages_ = 0; |
| 606 #endif |
493 } | 607 } |
494 | 608 |
495 } // namespace tcmalloc | 609 } // namespace tcmalloc |
OLD | NEW |