| OLD | NEW |
| 1 // Copyright (c) 2008, Google Inc. | 1 // Copyright (c) 2008, Google Inc. |
| 2 // All rights reserved. | 2 // All rights reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
| 9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
| 10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 44 "to return memory slower. Reasonable rates are in the " | 44 "to return memory slower. Reasonable rates are in the " |
| 45 "range [0,10]"); | 45 "range [0,10]"); |
| 46 | 46 |
| 47 namespace tcmalloc { | 47 namespace tcmalloc { |
| 48 | 48 |
| 49 PageHeap::PageHeap() | 49 PageHeap::PageHeap() |
| 50 : pagemap_(MetaDataAlloc), | 50 : pagemap_(MetaDataAlloc), |
| 51 pagemap_cache_(0), | 51 pagemap_cache_(0), |
| 52 free_pages_(0), | 52 free_pages_(0), |
| 53 system_bytes_(0), | 53 system_bytes_(0), |
| 54 #if DEFER_DECOMMIT | |
| 55 free_committed_pages_(0), | |
| 56 pages_committed_since_last_scavenge_(0), | |
| 57 #else | |
| 58 scavenge_counter_(0), | 54 scavenge_counter_(0), |
| 59 #endif | |
| 60 // Start scavenging at kMaxPages list | 55 // Start scavenging at kMaxPages list |
| 61 scavenge_index_(kMaxPages-1) { | 56 scavenge_index_(kMaxPages-1) { |
| 62 COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits); | 57 COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits); |
| 63 DLL_Init(&large_.normal); | 58 DLL_Init(&large_.normal); |
| 64 DLL_Init(&large_.returned); | 59 DLL_Init(&large_.returned); |
| 65 for (int i = 0; i < kMaxPages; i++) { | 60 for (int i = 0; i < kMaxPages; i++) { |
| 66 DLL_Init(&free_[i].normal); | 61 DLL_Init(&free_[i].normal); |
| 67 DLL_Init(&free_[i].returned); | 62 DLL_Init(&free_[i].returned); |
| 68 } | 63 } |
| 69 } | 64 } |
| (...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 153 span->length = n; | 148 span->length = n; |
| 154 | 149 |
| 155 return leftover; | 150 return leftover; |
| 156 } | 151 } |
| 157 | 152 |
| 158 void PageHeap::CommitSpan(Span* span) { | 153 void PageHeap::CommitSpan(Span* span) { |
| 159 TCMalloc_SystemCommit( | 154 TCMalloc_SystemCommit( |
| 160 reinterpret_cast<void*>(span->start << kPageShift), | 155 reinterpret_cast<void*>(span->start << kPageShift), |
| 161 static_cast<size_t>(span->length << kPageShift) | 156 static_cast<size_t>(span->length << kPageShift) |
| 162 ); | 157 ); |
| 163 #if DEFER_DECOMMIT | |
| 164 pages_committed_since_last_scavenge_ += span->length; | |
| 165 #endif | |
| 166 } | 158 } |
| 167 | 159 |
| 168 Span* PageHeap::Carve(Span* span, Length n) { | 160 Span* PageHeap::Carve(Span* span, Length n) { |
| 169 ASSERT(n > 0); | 161 ASSERT(n > 0); |
| 170 ASSERT(span->location != Span::IN_USE); | 162 ASSERT(span->location != Span::IN_USE); |
| 171 const int old_location = span->location; | 163 const int old_location = span->location; |
| 172 DLL_Remove(span); | 164 DLL_Remove(span); |
| 173 span->location = Span::IN_USE; | 165 span->location = Span::IN_USE; |
| 174 Event(span, 'A', n); | 166 Event(span, 'A', n); |
| 175 | 167 |
| (...skipping 10 matching lines...) Expand all Loading... |
| 186 Span* dst = (leftover->location == Span::ON_RETURNED_FREELIST | 178 Span* dst = (leftover->location == Span::ON_RETURNED_FREELIST |
| 187 ? &listpair->returned : &listpair->normal); | 179 ? &listpair->returned : &listpair->normal); |
| 188 DLL_Prepend(dst, leftover); | 180 DLL_Prepend(dst, leftover); |
| 189 | 181 |
| 190 span->length = n; | 182 span->length = n; |
| 191 pagemap_.set(span->start + n - 1, span); | 183 pagemap_.set(span->start + n - 1, span); |
| 192 } | 184 } |
| 193 if (old_location == Span::ON_RETURNED_FREELIST) { | 185 if (old_location == Span::ON_RETURNED_FREELIST) { |
| 194 // We need to recommit this address space. | 186 // We need to recommit this address space. |
| 195 CommitSpan(span); | 187 CommitSpan(span); |
| 196 } else { | |
| 197 #if DEFER_DECOMMIT | |
| 198 // The newly allocated memory is from a span that's already committed. | |
| 199 // Update the free_committed_pages_ count. | |
| 200 ASSERT(free_committed_pages_ >= n); | |
| 201 free_committed_pages_ -= n; | |
| 202 #endif | |
| 203 } | 188 } |
| 204 ASSERT(span->location == Span::IN_USE); | 189 ASSERT(span->location == Span::IN_USE); |
| 205 ASSERT(span->length == n); | 190 ASSERT(span->length == n); |
| 206 ASSERT(Check()); | 191 ASSERT(Check()); |
| 207 free_pages_ -= n; | 192 free_pages_ -= n; |
| 208 return span; | 193 return span; |
| 209 } | 194 } |
| 210 | 195 |
| 211 void PageHeap::Delete(Span* span) { | 196 void PageHeap::Delete(Span* span) { |
| 212 ASSERT(Check()); | 197 ASSERT(Check()); |
| (...skipping 15 matching lines...) Expand all Loading... |
| 228 // decommitting (decommitting is performed offline). | 213 // decommitting (decommitting is performed offline). |
| 229 const PageID p = span->start; | 214 const PageID p = span->start; |
| 230 const Length n = span->length; | 215 const Length n = span->length; |
| 231 Span* prev = GetDescriptor(p-1); | 216 Span* prev = GetDescriptor(p-1); |
| 232 if (prev != NULL && prev->location != Span::IN_USE) { | 217 if (prev != NULL && prev->location != Span::IN_USE) { |
| 233 // Merge preceding span into this span | 218 // Merge preceding span into this span |
| 234 ASSERT(prev->start + prev->length == p); | 219 ASSERT(prev->start + prev->length == p); |
| 235 const Length len = prev->length; | 220 const Length len = prev->length; |
| 236 if (prev->location == Span::ON_RETURNED_FREELIST) { | 221 if (prev->location == Span::ON_RETURNED_FREELIST) { |
| 237 CommitSpan(prev); | 222 CommitSpan(prev); |
| 238 #if DEFER_DECOMMIT | |
| 239 free_committed_pages_ += len; | |
| 240 #endif | |
| 241 } | 223 } |
| 242 DLL_Remove(prev); | 224 DLL_Remove(prev); |
| 243 DeleteSpan(prev); | 225 DeleteSpan(prev); |
| 244 span->start -= len; | 226 span->start -= len; |
| 245 span->length += len; | 227 span->length += len; |
| 246 pagemap_.set(span->start, span); | 228 pagemap_.set(span->start, span); |
| 247 Event(span, 'L', len); | 229 Event(span, 'L', len); |
| 248 } | 230 } |
| 249 Span* next = GetDescriptor(p+n); | 231 Span* next = GetDescriptor(p+n); |
| 250 if (next != NULL && next->location != Span::IN_USE) { | 232 if (next != NULL && next->location != Span::IN_USE) { |
| 251 // Merge next span into this span | 233 // Merge next span into this span |
| 252 ASSERT(next->start == p+n); | 234 ASSERT(next->start == p+n); |
| 253 const Length len = next->length; | 235 const Length len = next->length; |
| 254 if (next->location == Span::ON_RETURNED_FREELIST) { | 236 if (next->location == Span::ON_RETURNED_FREELIST) { |
| 255 CommitSpan(next); | 237 CommitSpan(next); |
| 256 #if DEFER_DECOMMIT | |
| 257 free_committed_pages_ += len; | |
| 258 #endif | |
| 259 } | 238 } |
| 260 DLL_Remove(next); | 239 DLL_Remove(next); |
| 261 DeleteSpan(next); | 240 DeleteSpan(next); |
| 262 span->length += len; | 241 span->length += len; |
| 263 pagemap_.set(span->start + span->length - 1, span); | 242 pagemap_.set(span->start + span->length - 1, span); |
| 264 Event(span, 'R', len); | 243 Event(span, 'R', len); |
| 265 } | 244 } |
| 266 | 245 |
| 267 Event(span, 'D', span->length); | 246 Event(span, 'D', span->length); |
| 268 span->location = Span::ON_NORMAL_FREELIST; | 247 span->location = Span::ON_NORMAL_FREELIST; |
| 269 if (span->length < kMaxPages) | 248 if (span->length < kMaxPages) { |
| 270 DLL_Prepend(&free_[span->length].normal, span); | 249 DLL_Prepend(&free_[span->length].normal, span); |
| 271 else | 250 } else { |
| 272 DLL_Prepend(&large_.normal, span); | 251 DLL_Prepend(&large_.normal, span); |
| 252 } |
| 273 free_pages_ += n; | 253 free_pages_ += n; |
| 274 #if DEFER_DECOMMIT | |
| 275 free_committed_pages_ += n; | |
| 276 #endif | |
| 277 | 254 |
| 278 #if DEFER_DECOMMIT | |
| 279 // TODO(antonm): notify that could start scavenging | |
| 280 #else | |
| 281 IncrementalScavenge(n); | 255 IncrementalScavenge(n); |
| 282 #endif | |
| 283 ASSERT(Check()); | 256 ASSERT(Check()); |
| 284 } | 257 } |
| 285 | 258 |
| 286 | |
| 287 void PageHeap::Scavenge() { | |
| 288 #if DEFER_DECOMMIT | |
| 289 // If we have to commit memory since the last scavenge, it means we don't | |
| 290 // have enough free committed pages of necessary size for the amount of | |
| 291 // allocations that we do. So hold off on releasing memory back to the system
. | |
| 292 if (pages_committed_since_last_scavenge_ > 0) { | |
| 293 pages_committed_since_last_scavenge_ = 0; | |
| 294 return; | |
| 295 } | |
| 296 | |
| 297 if (free_committed_pages_ <= kMinimumFreeCommittedPageCount) { | |
| 298 return; | |
| 299 } | |
| 300 | |
| 301 uint64_t to_decommit = std::min( | |
| 302 free_committed_pages_ - kMinimumFreeCommittedPageCount, | |
| 303 free_committed_pages_ / kMaxScavengeAmountFactor); | |
| 304 to_decommit = DecommitFromSpanList(&large_, to_decommit); | |
| 305 for (int i = kMaxPages - 1; i >= 0; i--) { | |
| 306 to_decommit = DecommitFromSpanList(&free_[i], to_decommit); | |
| 307 } | |
| 308 | |
| 309 // Force at least one decommit from large list, otherwise big sized blocks | |
| 310 // sitting might block as from releasing smaller blocks behind. | |
| 311 if (to_decommit > 0) { | |
| 312 if (!DLL_IsEmpty(&large_.normal)) { | |
| 313 DecommitLastSpan(&large_, large_.normal.prev); | |
| 314 } | |
| 315 } | |
| 316 #endif | |
| 317 } | |
| 318 | |
| 319 #if DEFER_DECOMMIT | |
| 320 Length PageHeap::DecommitLastSpan(SpanList* span_list, Span* span) { | |
| 321 ASSERT(!DLL_IsEmpty(&span_list->normal)); | |
| 322 ASSERT(span_list->normal.prev == span); | |
| 323 | |
| 324 Length length = span->length; | |
| 325 | |
| 326 DLL_Remove(span); | |
| 327 | |
| 328 TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift), spa
n->length << kPageShift); | |
| 329 span->location = Span::ON_RETURNED_FREELIST; | |
| 330 ASSERT(free_committed_pages_ >= length); | |
| 331 free_committed_pages_ -= length; | |
| 332 | |
| 333 DLL_Prepend(&span_list->returned, span); | |
| 334 | |
| 335 return length; | |
| 336 } | |
| 337 | |
| 338 uint64_t PageHeap::DecommitFromSpanList(SpanList* span_list, uint64_t to_decommi
t) { | |
| 339 while (!DLL_IsEmpty(&span_list->normal)) { | |
| 340 // Release the last span on the normal portion of this list. | |
| 341 Span* span = span_list->normal.prev; | |
| 342 | |
| 343 if (span->length > to_decommit) { | |
| 344 return to_decommit; | |
| 345 } | |
| 346 | |
| 347 to_decommit -= DecommitLastSpan(span_list, span); | |
| 348 } | |
| 349 | |
| 350 return to_decommit; | |
| 351 } | |
| 352 | |
| 353 #else | |
| 354 | |
| 355 void PageHeap::IncrementalScavenge(Length n) { | 259 void PageHeap::IncrementalScavenge(Length n) { |
| 356 // Fast path; not yet time to release memory | 260 // Fast path; not yet time to release memory |
| 357 scavenge_counter_ -= n; | 261 scavenge_counter_ -= n; |
| 358 if (scavenge_counter_ >= 0) return; // Not yet time to scavenge | 262 if (scavenge_counter_ >= 0) return; // Not yet time to scavenge |
| 359 | 263 |
| 360 // Never delay scavenging for more than the following number of | 264 // Never delay scavenging for more than the following number of |
| 361 // deallocated pages. With 4K pages, this comes to 4GB of | 265 // deallocated pages. With 4K pages, this comes to 4GB of |
| 362 // deallocation. | 266 // deallocation. |
| 363 // Chrome: Changed to 64MB | 267 // Chrome: Changed to 64MB |
| 364 static const int kMaxReleaseDelay = 1 << 14; | 268 static const int kMaxReleaseDelay = 1 << 14; |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 404 scavenge_index_ = index; // Scavenge at index+1 next time | 308 scavenge_index_ = index; // Scavenge at index+1 next time |
| 405 // Note: we stop scavenging after finding one. | 309 // Note: we stop scavenging after finding one. |
| 406 return; | 310 return; |
| 407 } | 311 } |
| 408 index++; | 312 index++; |
| 409 } | 313 } |
| 410 | 314 |
| 411 // Nothing to scavenge, delay for a while | 315 // Nothing to scavenge, delay for a while |
| 412 scavenge_counter_ = kDefaultReleaseDelay; | 316 scavenge_counter_ = kDefaultReleaseDelay; |
| 413 } | 317 } |
| 414 #endif | |
| 415 | 318 |
| 416 void PageHeap::RegisterSizeClass(Span* span, size_t sc) { | 319 void PageHeap::RegisterSizeClass(Span* span, size_t sc) { |
| 417 // Associate span object with all interior pages as well | 320 // Associate span object with all interior pages as well |
| 418 ASSERT(span->location == Span::IN_USE); | 321 ASSERT(span->location == Span::IN_USE); |
| 419 ASSERT(GetDescriptor(span->start) == span); | 322 ASSERT(GetDescriptor(span->start) == span); |
| 420 ASSERT(GetDescriptor(span->start+span->length-1) == span); | 323 ASSERT(GetDescriptor(span->start+span->length-1) == span); |
| 421 Event(span, 'C', sc); | 324 Event(span, 'C', sc); |
| 422 span->sizeclass = sc; | 325 span->sizeclass = sc; |
| 423 for (Length i = 1; i < span->length-1; i++) { | 326 for (Length i = 1; i < span->length-1; i++) { |
| 424 pagemap_.set(span->start+i, span); | 327 pagemap_.set(span->start+i, span); |
| (...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 506 void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); | 409 void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); |
| 507 if (ptr == NULL) { | 410 if (ptr == NULL) { |
| 508 if (n < ask) { | 411 if (n < ask) { |
| 509 // Try growing just "n" pages | 412 // Try growing just "n" pages |
| 510 ask = n; | 413 ask = n; |
| 511 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); | 414 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); |
| 512 } | 415 } |
| 513 if (ptr == NULL) return false; | 416 if (ptr == NULL) return false; |
| 514 } | 417 } |
| 515 ask = actual_size >> kPageShift; | 418 ask = actual_size >> kPageShift; |
| 516 #if DEFER_DECOMMIT | |
| 517 pages_committed_since_last_scavenge_ += ask; | |
| 518 #endif | |
| 519 RecordGrowth(ask << kPageShift); | 419 RecordGrowth(ask << kPageShift); |
| 520 | 420 |
| 521 uint64_t old_system_bytes = system_bytes_; | 421 uint64_t old_system_bytes = system_bytes_; |
| 522 system_bytes_ += (ask << kPageShift); | 422 system_bytes_ += (ask << kPageShift); |
| 523 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; | 423 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; |
| 524 ASSERT(p > 0); | 424 ASSERT(p > 0); |
| 525 | 425 |
| 526 // If we have already a lot of pages allocated, just pre allocate a bunch of | 426 // If we have already a lot of pages allocated, just pre allocate a bunch of |
| 527 // memory for the page map. This prevents fragmentation by pagemap metadata | 427 // memory for the page map. This prevents fragmentation by pagemap metadata |
| 528 // when a program keeps allocating and freeing large blocks. | 428 // when a program keeps allocating and freeing large blocks. |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 594 static_cast<size_t>(s->length << kPageShift)); | 494 static_cast<size_t>(s->length << kPageShift)); |
| 595 } | 495 } |
| 596 } | 496 } |
| 597 | 497 |
| 598 void PageHeap::ReleaseFreePages() { | 498 void PageHeap::ReleaseFreePages() { |
| 599 for (Length s = 0; s < kMaxPages; s++) { | 499 for (Length s = 0; s < kMaxPages; s++) { |
| 600 ReleaseFreeList(&free_[s].normal, &free_[s].returned); | 500 ReleaseFreeList(&free_[s].normal, &free_[s].returned); |
| 601 } | 501 } |
| 602 ReleaseFreeList(&large_.normal, &large_.returned); | 502 ReleaseFreeList(&large_.normal, &large_.returned); |
| 603 ASSERT(Check()); | 503 ASSERT(Check()); |
| 604 #if DEFER_DECOMMIT | |
| 605 free_committed_pages_ = 0; | |
| 606 #endif | |
| 607 } | 504 } |
| 608 | 505 |
| 609 } // namespace tcmalloc | 506 } // namespace tcmalloc |
| OLD | NEW |