| OLD | NEW |
| 1 // Copyright (c) 2008, Google Inc. | 1 // Copyright (c) 2008, Google Inc. |
| 2 // All rights reserved. | 2 // All rights reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
| 9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
| 10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 42 "Zero means we never release memory back to the system. " | 42 "Zero means we never release memory back to the system. " |
| 43 "Increase this flag to return memory faster; decrease it " | 43 "Increase this flag to return memory faster; decrease it " |
| 44 "to return memory slower. Reasonable rates are in the " | 44 "to return memory slower. Reasonable rates are in the " |
| 45 "range [0,10]"); | 45 "range [0,10]"); |
| 46 | 46 |
| 47 namespace tcmalloc { | 47 namespace tcmalloc { |
| 48 | 48 |
| 49 PageHeap::PageHeap() | 49 PageHeap::PageHeap() |
| 50 : pagemap_(MetaDataAlloc), | 50 : pagemap_(MetaDataAlloc), |
| 51 pagemap_cache_(0), | 51 pagemap_cache_(0), |
| 52 free_pages_(0), |
| 53 system_bytes_(0), |
| 54 committed_bytes_(0), |
| 52 scavenge_counter_(0), | 55 scavenge_counter_(0), |
| 53 // Start scavenging at kMaxPages list | 56 // Start scavenging at kMaxPages list |
| 54 release_index_(kMaxPages) { | 57 scavenge_index_(kMaxPages-1) { |
| 55 COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits); | 58 COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits); |
| 56 DLL_Init(&large_.normal); | 59 DLL_Init(&large_.normal); |
| 57 DLL_Init(&large_.returned); | 60 DLL_Init(&large_.returned); |
| 58 for (int i = 0; i < kMaxPages; i++) { | 61 for (int i = 0; i < kMaxPages; i++) { |
| 59 DLL_Init(&free_[i].normal); | 62 DLL_Init(&free_[i].normal); |
| 60 DLL_Init(&free_[i].returned); | 63 DLL_Init(&free_[i].returned); |
| 61 } | 64 } |
| 62 } | 65 } |
| 63 | 66 |
| 64 Span* PageHeap::New(Length n) { | 67 Span* PageHeap::New(Length n) { |
| (...skipping 15 matching lines...) Expand all Loading... |
| 80 return Carve(ll->next, n); | 83 return Carve(ll->next, n); |
| 81 } | 84 } |
| 82 // Still no luck, so keep looking in larger classes. | 85 // Still no luck, so keep looking in larger classes. |
| 83 } | 86 } |
| 84 | 87 |
| 85 Span* result = AllocLarge(n); | 88 Span* result = AllocLarge(n); |
| 86 if (result != NULL) return result; | 89 if (result != NULL) return result; |
| 87 | 90 |
| 88 // Grow the heap and try again | 91 // Grow the heap and try again |
| 89 if (!GrowHeap(n)) { | 92 if (!GrowHeap(n)) { |
| 90 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes); | |
| 91 ASSERT(Check()); | 93 ASSERT(Check()); |
| 92 return NULL; | 94 return NULL; |
| 93 } | 95 } |
| 94 | 96 |
| 95 return AllocLarge(n); | 97 return AllocLarge(n); |
| 96 } | 98 } |
| 97 | 99 |
| 98 Span* PageHeap::AllocLarge(Length n) { | 100 Span* PageHeap::AllocLarge(Length n) { |
| 99 // find the best span (closest to n in size). | 101 // find the best span (closest to n in size). |
| 100 // The following loops implements address-ordered best-fit. | 102 // The following loops implements address-ordered best-fit. |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 145 RecordSpan(leftover); | 147 RecordSpan(leftover); |
| 146 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span | 148 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span |
| 147 span->length = n; | 149 span->length = n; |
| 148 | 150 |
| 149 return leftover; | 151 return leftover; |
| 150 } | 152 } |
| 151 | 153 |
| 152 void PageHeap::CommitSpan(Span* span) { | 154 void PageHeap::CommitSpan(Span* span) { |
| 153 TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift), | 155 TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift), |
| 154 static_cast<size_t>(span->length << kPageShift)); | 156 static_cast<size_t>(span->length << kPageShift)); |
| 155 stats_.committed_bytes += span->length << kPageShift; | 157 committed_bytes_ += span->length << kPageShift; |
| 156 } | 158 } |
| 157 | 159 |
| 158 void PageHeap::DecommitSpan(Span* span) { | 160 void PageHeap::DecommitSpan(Span* span) { |
| 159 TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift), | 161 TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift), |
| 160 static_cast<size_t>(span->length << kPageShift)); | 162 static_cast<size_t>(span->length << kPageShift)); |
| 161 stats_.committed_bytes -= span->length << kPageShift; | 163 committed_bytes_ -= span->length << kPageShift; |
| 162 } | 164 } |
| 163 | 165 |
| 164 Span* PageHeap::Carve(Span* span, Length n) { | 166 Span* PageHeap::Carve(Span* span, Length n) { |
| 165 ASSERT(n > 0); | 167 ASSERT(n > 0); |
| 166 ASSERT(span->location != Span::IN_USE); | 168 ASSERT(span->location != Span::IN_USE); |
| 167 const int old_location = span->location; | 169 const int old_location = span->location; |
| 168 RemoveFromFreeList(span); | 170 DLL_Remove(span); |
| 169 span->location = Span::IN_USE; | 171 span->location = Span::IN_USE; |
| 170 Event(span, 'A', n); | 172 Event(span, 'A', n); |
| 171 | 173 |
| 172 const int extra = span->length - n; | 174 const int extra = span->length - n; |
| 173 ASSERT(extra >= 0); | 175 ASSERT(extra >= 0); |
| 174 if (extra > 0) { | 176 if (extra > 0) { |
| 175 Span* leftover = NewSpan(span->start + n, extra); | 177 Span* leftover = NewSpan(span->start + n, extra); |
| 176 leftover->location = old_location; | 178 leftover->location = old_location; |
| 177 Event(leftover, 'S', extra); | 179 Event(leftover, 'S', extra); |
| 178 RecordSpan(leftover); | 180 RecordSpan(leftover); |
| 179 | 181 |
| 180 // The previous span of |leftover| was just splitted -- no need to | 182 // Place leftover span on appropriate free list |
| 181 // coalesce them. The next span of |leftover| was not previously coalesced | 183 SpanList* listpair = (extra < kMaxPages) ? &free_[extra] : &large_; |
| 182 // with |span|, i.e. is NULL or has got location other than |old_location|. | 184 Span* dst = (leftover->location == Span::ON_RETURNED_FREELIST |
| 183 const PageID p = leftover->start; | 185 ? &listpair->returned : &listpair->normal); |
| 184 const Length len = leftover->length; | 186 DLL_Prepend(dst, leftover); |
| 185 Span* next = GetDescriptor(p+len); | |
| 186 ASSERT (next == NULL || | |
| 187 next->location == Span::IN_USE || | |
| 188 next->location != leftover->location); | |
| 189 | 187 |
| 190 PrependToFreeList(leftover); // Skip coalescing - no candidates possible | |
| 191 span->length = n; | 188 span->length = n; |
| 192 pagemap_.set(span->start + n - 1, span); | 189 pagemap_.set(span->start + n - 1, span); |
| 193 } | 190 } |
| 194 ASSERT(Check()); | 191 ASSERT(Check()); |
| 192 free_pages_ -= n; |
| 195 if (old_location == Span::ON_RETURNED_FREELIST) { | 193 if (old_location == Span::ON_RETURNED_FREELIST) { |
| 196 // We need to recommit this address space. | 194 // We need to recommit this address space. |
| 197 CommitSpan(span); | 195 CommitSpan(span); |
| 198 } | 196 } |
| 199 ASSERT(span->location == Span::IN_USE); | 197 ASSERT(span->location == Span::IN_USE); |
| 200 ASSERT(span->length == n); | 198 ASSERT(span->length == n); |
| 201 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes); | |
| 202 return span; | 199 return span; |
| 203 } | 200 } |
| 204 | 201 |
| 205 void PageHeap::Delete(Span* span) { | 202 void PageHeap::Delete(Span* span) { |
| 206 ASSERT(Check()); | 203 ASSERT(Check()); |
| 207 ASSERT(span->location == Span::IN_USE); | 204 ASSERT(span->location == Span::IN_USE); |
| 208 ASSERT(span->length > 0); | 205 ASSERT(span->length > 0); |
| 209 ASSERT(GetDescriptor(span->start) == span); | 206 ASSERT(GetDescriptor(span->start) == span); |
| 210 ASSERT(GetDescriptor(span->start + span->length - 1) == span); | 207 ASSERT(GetDescriptor(span->start + span->length - 1) == span); |
| 211 const Length n = span->length; | |
| 212 span->sizeclass = 0; | 208 span->sizeclass = 0; |
| 213 span->sample = 0; | 209 span->sample = 0; |
| 214 span->location = Span::ON_NORMAL_FREELIST; | |
| 215 Event(span, 'D', span->length); | |
| 216 MergeIntoFreeList(span); // Coalesces if possible | |
| 217 IncrementalScavenge(n); | |
| 218 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes); | |
| 219 ASSERT(Check()); | |
| 220 } | |
| 221 | |
| 222 void PageHeap::MergeIntoFreeList(Span* span) { | |
| 223 ASSERT(span->location != Span::IN_USE); | |
| 224 | 210 |
| 225 // Coalesce -- we guarantee that "p" != 0, so no bounds checking | 211 // Coalesce -- we guarantee that "p" != 0, so no bounds checking |
| 226 // necessary. We do not bother resetting the stale pagemap | 212 // necessary. We do not bother resetting the stale pagemap |
| 227 // entries for the pieces we are merging together because we only | 213 // entries for the pieces we are merging together because we only |
| 228 // care about the pagemap entries for the boundaries. | 214 // care about the pagemap entries for the boundaries. |
| 229 // | 215 // |
| 230 // Note that the adjacent spans we merge into "span" may come out of a | 216 // Note that the adjacent spans we merge into "span" may come out of a |
| 231 // "normal" (committed) list, and cleanly merge with our IN_USE span, which | 217 // "normal" (committed) list, and cleanly merge with our IN_USE span, which |
| 232 // is implicitly committed. If the adjacents spans are on the "returned" | 218 // is implicitly committed. If the adjacents spans are on the "returned" |
| 233 // (decommitted) list, then we must get both spans into the same state before | 219 // (decommitted) list, then we must get both spans into the same state before |
| (...skipping 12 matching lines...) Expand all Loading... |
| 246 const PageID p = span->start; | 232 const PageID p = span->start; |
| 247 const Length n = span->length; | 233 const Length n = span->length; |
| 248 Span* prev = GetDescriptor(p-1); | 234 Span* prev = GetDescriptor(p-1); |
| 249 if (prev != NULL && prev->location != Span::IN_USE) { | 235 if (prev != NULL && prev->location != Span::IN_USE) { |
| 250 // Merge preceding span into this span | 236 // Merge preceding span into this span |
| 251 ASSERT(prev->start + prev->length == p); | 237 ASSERT(prev->start + prev->length == p); |
| 252 const Length len = prev->length; | 238 const Length len = prev->length; |
| 253 if (prev->location == Span::ON_RETURNED_FREELIST) { | 239 if (prev->location == Span::ON_RETURNED_FREELIST) { |
| 254 // We're about to put the merge span into the returned freelist and call | 240 // We're about to put the merge span into the returned freelist and call |
| 255 // DecommitSpan() on it, which will mark the entire span including this | 241 // DecommitSpan() on it, which will mark the entire span including this |
| 256 // one as released and decrease stats_.committed_bytes by the size of the | 242 // one as released and decrease committed_bytes_ by the size of the |
| 257 // merged span. To make the math work out we temporarily increase the | 243 // merged span. To make the math work out we temporarily increase the |
| 258 // stats_.committed_bytes amount. | 244 // committed_bytes_ amount. |
| 259 stats_.committed_bytes += prev->length << kPageShift; | 245 committed_bytes_ += prev->length << kPageShift; |
| 260 } | 246 } |
| 261 RemoveFromFreeList(prev); | 247 DLL_Remove(prev); |
| 262 DeleteSpan(prev); | 248 DeleteSpan(prev); |
| 263 span->start -= len; | 249 span->start -= len; |
| 264 span->length += len; | 250 span->length += len; |
| 265 pagemap_.set(span->start, span); | 251 pagemap_.set(span->start, span); |
| 266 Event(span, 'L', len); | 252 Event(span, 'L', len); |
| 267 } | 253 } |
| 268 Span* next = GetDescriptor(p+n); | 254 Span* next = GetDescriptor(p+n); |
| 269 if (next != NULL && next->location != Span::IN_USE) { | 255 if (next != NULL && next->location != Span::IN_USE) { |
| 270 // Merge next span into this span | 256 // Merge next span into this span |
| 271 ASSERT(next->start == p+n); | 257 ASSERT(next->start == p+n); |
| 272 const Length len = next->length; | 258 const Length len = next->length; |
| 273 if (next->location == Span::ON_RETURNED_FREELIST) { | 259 if (next->location == Span::ON_RETURNED_FREELIST) { |
| 274 // See the comment below 'if (prev->location ...' for explanation. | 260 // See the comment below 'if (prev->location ...' for explanation. |
| 275 stats_.committed_bytes += next->length << kPageShift; | 261 committed_bytes_ += next->length << kPageShift; |
| 276 } | 262 } |
| 277 RemoveFromFreeList(next); | 263 DLL_Remove(next); |
| 278 DeleteSpan(next); | 264 DeleteSpan(next); |
| 279 span->length += len; | 265 span->length += len; |
| 280 pagemap_.set(span->start + span->length - 1, span); | 266 pagemap_.set(span->start + span->length - 1, span); |
| 281 Event(span, 'R', len); | 267 Event(span, 'R', len); |
| 282 } | 268 } |
| 283 | 269 |
| 284 Event(span, 'D', span->length); | 270 Event(span, 'D', span->length); |
| 285 span->location = Span::ON_RETURNED_FREELIST; | 271 span->location = Span::ON_RETURNED_FREELIST; |
| 286 DecommitSpan(span); | 272 DecommitSpan(span); |
| 287 PrependToFreeList(span); | 273 if (span->length < kMaxPages) { |
| 288 } | 274 DLL_Prepend(&free_[span->length].returned, span); |
| 275 } else { |
| 276 DLL_Prepend(&large_.returned, span); |
| 277 } |
| 278 free_pages_ += n; |
| 289 | 279 |
| 290 void PageHeap::PrependToFreeList(Span* span) { | 280 IncrementalScavenge(n); |
| 291 ASSERT(span->location != Span::IN_USE); | 281 ASSERT(Check()); |
| 292 SpanList* list = (span->length < kMaxPages) ? &free_[span->length] : &large_; | |
| 293 if (span->location == Span::ON_NORMAL_FREELIST) { | |
| 294 stats_.free_bytes += (span->length << kPageShift); | |
| 295 DLL_Prepend(&list->normal, span); | |
| 296 } else { | |
| 297 stats_.unmapped_bytes += (span->length << kPageShift); | |
| 298 DLL_Prepend(&list->returned, span); | |
| 299 } | |
| 300 } | |
| 301 | |
| 302 void PageHeap::RemoveFromFreeList(Span* span) { | |
| 303 ASSERT(span->location != Span::IN_USE); | |
| 304 if (span->location == Span::ON_NORMAL_FREELIST) { | |
| 305 stats_.free_bytes -= (span->length << kPageShift); | |
| 306 } else { | |
| 307 stats_.unmapped_bytes -= (span->length << kPageShift); | |
| 308 } | |
| 309 DLL_Remove(span); | |
| 310 } | 282 } |
| 311 | 283 |
| 312 void PageHeap::IncrementalScavenge(Length n) { | 284 void PageHeap::IncrementalScavenge(Length n) { |
| 313 // Fast path; not yet time to release memory | 285 // Fast path; not yet time to release memory |
| 314 scavenge_counter_ -= n; | 286 scavenge_counter_ -= n; |
| 315 if (scavenge_counter_ >= 0) return; // Not yet time to scavenge | 287 if (scavenge_counter_ >= 0) return; // Not yet time to scavenge |
| 316 | 288 |
| 289 // Never delay scavenging for more than the following number of |
| 290 // deallocated pages. With 4K pages, this comes to 4GB of |
| 291 // deallocation. |
| 292 // Chrome: Changed to 64MB |
| 293 static const int kMaxReleaseDelay = 1 << 14; |
| 294 |
| 295 // If there is nothing to release, wait for so many pages before |
| 296 // scavenging again. With 4K pages, this comes to 1GB of memory. |
| 297 // Chrome: Changed to 16MB |
| 298 static const int kDefaultReleaseDelay = 1 << 12; |
| 299 |
| 317 const double rate = FLAGS_tcmalloc_release_rate; | 300 const double rate = FLAGS_tcmalloc_release_rate; |
| 318 if (rate <= 1e-6) { | 301 if (rate <= 1e-6) { |
| 319 // Tiny release rate means that releasing is disabled. | 302 // Tiny release rate means that releasing is disabled. |
| 320 scavenge_counter_ = kDefaultReleaseDelay; | 303 scavenge_counter_ = kDefaultReleaseDelay; |
| 321 return; | 304 return; |
| 322 } | 305 } |
| 323 | 306 |
| 324 Length released_pages = ReleaseAtLeastNPages(1); | 307 // Find index of free list to scavenge |
| 308 int index = scavenge_index_ + 1; |
| 309 for (int i = 0; i < kMaxPages+1; i++) { |
| 310 if (index > kMaxPages) index = 0; |
| 311 SpanList* slist = (index == kMaxPages) ? &large_ : &free_[index]; |
| 312 if (!DLL_IsEmpty(&slist->normal)) { |
| 313 // Release the last span on the normal portion of this list |
| 314 Span* s = slist->normal.prev; |
| 315 ASSERT(s->location == Span::ON_NORMAL_FREELIST); |
| 316 DLL_Remove(s); |
| 317 DecommitSpan(s); |
| 318 s->location = Span::ON_RETURNED_FREELIST; |
| 319 DLL_Prepend(&slist->returned, s); |
| 325 | 320 |
| 326 if (released_pages == 0) { | 321 // Compute how long to wait until we return memory. |
| 327 // Nothing to scavenge, delay for a while. | 322 // FLAGS_tcmalloc_release_rate==1 means wait for 1000 pages |
| 328 scavenge_counter_ = kDefaultReleaseDelay; | 323 // after releasing one page. |
| 329 } else { | 324 const double mult = 1000.0 / rate; |
| 330 printf("HERE!\n"); | 325 double wait = mult * static_cast<double>(s->length); |
| 331 ASSERT(0); | 326 if (wait > kMaxReleaseDelay) { |
| 332 // Compute how long to wait until we return memory. | 327 // Avoid overflow and bound to reasonable range |
| 333 // FLAGS_tcmalloc_release_rate==1 means wait for 1000 pages | 328 wait = kMaxReleaseDelay; |
| 334 // after releasing one page. | 329 } |
| 335 const double mult = 1000.0 / rate; | 330 scavenge_counter_ = static_cast<int64_t>(wait); |
| 336 double wait = mult * static_cast<double>(released_pages); | 331 |
| 337 if (wait > kMaxReleaseDelay) { | 332 scavenge_index_ = index; // Scavenge at index+1 next time |
| 338 // Avoid overflow and bound to reasonable range. | 333 // Note: we stop scavenging after finding one. |
| 339 wait = kMaxReleaseDelay; | 334 return; |
| 340 } | 335 } |
| 341 scavenge_counter_ = static_cast<int64_t>(wait); | 336 index++; |
| 342 } | 337 } |
| 343 } | |
| 344 | 338 |
| 345 Length PageHeap::ReleaseLastNormalSpan(SpanList* slist) { | 339 // Nothing to scavenge, delay for a while |
| 346 Span* s = slist->normal.prev; | 340 scavenge_counter_ = kDefaultReleaseDelay; |
| 347 ASSERT(s->location == Span::ON_NORMAL_FREELIST); | |
| 348 RemoveFromFreeList(s); | |
| 349 const Length n = s->length; | |
| 350 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift), | |
| 351 static_cast<size_t>(s->length << kPageShift)); | |
| 352 s->location = Span::ON_RETURNED_FREELIST; | |
| 353 MergeIntoFreeList(s); // Coalesces if possible. | |
| 354 return n; | |
| 355 } | |
| 356 | |
| 357 Length PageHeap::ReleaseAtLeastNPages(Length num_pages) { | |
| 358 Length released_pages = 0; | |
| 359 Length prev_released_pages = -1; | |
| 360 | |
| 361 // Round robin through the lists of free spans, releasing the last | |
| 362 // span in each list. Stop after releasing at least num_pages. | |
| 363 while (released_pages < num_pages) { | |
| 364 if (released_pages == prev_released_pages) { | |
| 365 // Last iteration of while loop made no progress. | |
| 366 break; | |
| 367 } | |
| 368 prev_released_pages = released_pages; | |
| 369 | |
| 370 for (int i = 0; i < kMaxPages+1 && released_pages < num_pages; | |
| 371 i++, release_index_++) { | |
| 372 if (release_index_ > kMaxPages) release_index_ = 0; | |
| 373 SpanList* slist = (release_index_ == kMaxPages) ? | |
| 374 &large_ : &free_[release_index_]; | |
| 375 if (!DLL_IsEmpty(&slist->normal)) { | |
| 376 Length released_len = ReleaseLastNormalSpan(slist); | |
| 377 released_pages += released_len; | |
| 378 } | |
| 379 } | |
| 380 } | |
| 381 return released_pages; | |
| 382 } | 341 } |
| 383 | 342 |
| 384 void PageHeap::RegisterSizeClass(Span* span, size_t sc) { | 343 void PageHeap::RegisterSizeClass(Span* span, size_t sc) { |
| 385 // Associate span object with all interior pages as well | 344 // Associate span object with all interior pages as well |
| 386 ASSERT(span->location == Span::IN_USE); | 345 ASSERT(span->location == Span::IN_USE); |
| 387 ASSERT(GetDescriptor(span->start) == span); | 346 ASSERT(GetDescriptor(span->start) == span); |
| 388 ASSERT(GetDescriptor(span->start+span->length-1) == span); | 347 ASSERT(GetDescriptor(span->start+span->length-1) == span); |
| 389 Event(span, 'C', sc); | 348 Event(span, 'C', sc); |
| 390 span->sizeclass = sc; | 349 span->sizeclass = sc; |
| 391 for (Length i = 1; i < span->length-1; i++) { | 350 for (Length i = 1; i < span->length-1; i++) { |
| 392 pagemap_.set(span->start+i, span); | 351 pagemap_.set(span->start+i, span); |
| 393 } | 352 } |
| 394 } | 353 } |
| 395 | 354 |
| 396 static double MB(uint64_t bytes) { | |
| 397 return bytes / 1048576.0; | |
| 398 } | |
| 399 | |
| 400 static double PagesToMB(uint64_t pages) { | 355 static double PagesToMB(uint64_t pages) { |
| 401 return (pages << kPageShift) / 1048576.0; | 356 return (pages << kPageShift) / 1048576.0; |
| 402 } | 357 } |
| 403 | 358 |
| 404 void PageHeap::Dump(TCMalloc_Printer* out) { | 359 void PageHeap::Dump(TCMalloc_Printer* out) { |
| 405 int nonempty_sizes = 0; | 360 int nonempty_sizes = 0; |
| 406 for (int s = 0; s < kMaxPages; s++) { | 361 for (int s = 0; s < kMaxPages; s++) { |
| 407 if (!DLL_IsEmpty(&free_[s].normal) || !DLL_IsEmpty(&free_[s].returned)) { | 362 if (!DLL_IsEmpty(&free_[s].normal) || !DLL_IsEmpty(&free_[s].returned)) { |
| 408 nonempty_sizes++; | 363 nonempty_sizes++; |
| 409 } | 364 } |
| 410 } | 365 } |
| 411 out->printf("------------------------------------------------\n"); | 366 out->printf("------------------------------------------------\n"); |
| 412 out->printf("PageHeap: %d sizes; %6.1f MB free; %6.1f MB unmapped\n", | 367 out->printf("PageHeap: %d sizes; %6.1f MB free\n", |
| 413 nonempty_sizes, MB(stats_.free_bytes), MB(stats_.unmapped_bytes)); | 368 nonempty_sizes, PagesToMB(free_pages_)); |
| 414 out->printf("------------------------------------------------\n"); | 369 out->printf("------------------------------------------------\n"); |
| 415 uint64_t total_normal = 0; | 370 uint64_t total_normal = 0; |
| 416 uint64_t total_returned = 0; | 371 uint64_t total_returned = 0; |
| 417 for (int s = 0; s < kMaxPages; s++) { | 372 for (int s = 0; s < kMaxPages; s++) { |
| 418 const int n_length = DLL_Length(&free_[s].normal); | 373 const int n_length = DLL_Length(&free_[s].normal); |
| 419 const int r_length = DLL_Length(&free_[s].returned); | 374 const int r_length = DLL_Length(&free_[s].returned); |
| 420 if (n_length + r_length > 0) { | 375 if (n_length + r_length > 0) { |
| 421 uint64_t n_pages = s * n_length; | 376 uint64_t n_pages = s * n_length; |
| 422 uint64_t r_pages = s * r_length; | 377 uint64_t r_pages = s * r_length; |
| 423 total_normal += n_pages; | 378 total_normal += n_pages; |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 455 total_returned += r_pages; | 410 total_returned += r_pages; |
| 456 out->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum" | 411 out->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum" |
| 457 "; unmapped: %6.1f MB; %6.1f MB cum\n", | 412 "; unmapped: %6.1f MB; %6.1f MB cum\n", |
| 458 (n_spans + r_spans), | 413 (n_spans + r_spans), |
| 459 PagesToMB(n_pages + r_pages), | 414 PagesToMB(n_pages + r_pages), |
| 460 PagesToMB(total_normal + total_returned), | 415 PagesToMB(total_normal + total_returned), |
| 461 PagesToMB(r_pages), | 416 PagesToMB(r_pages), |
| 462 PagesToMB(total_returned)); | 417 PagesToMB(total_returned)); |
| 463 } | 418 } |
| 464 | 419 |
| 465 bool PageHeap::GetNextRange(PageID start, base::MallocRange* r) { | |
| 466 Span* span = reinterpret_cast<Span*>(pagemap_.Next(start)); | |
| 467 if (span == NULL) { | |
| 468 return false; | |
| 469 } | |
| 470 r->address = span->start << kPageShift; | |
| 471 r->length = span->length << kPageShift; | |
| 472 r->fraction = 0; | |
| 473 switch (span->location) { | |
| 474 case Span::IN_USE: | |
| 475 r->type = base::MallocRange::INUSE; | |
| 476 r->fraction = 1; | |
| 477 if (span->sizeclass > 0) { | |
| 478 // Only some of the objects in this span may be in use. | |
| 479 const size_t osize = Static::sizemap()->class_to_size(span->sizeclass); | |
| 480 r->fraction = (1.0 * osize * span->refcount) / r->length; | |
| 481 } | |
| 482 break; | |
| 483 case Span::ON_NORMAL_FREELIST: | |
| 484 r->type = base::MallocRange::FREE; | |
| 485 break; | |
| 486 case Span::ON_RETURNED_FREELIST: | |
| 487 r->type = base::MallocRange::UNMAPPED; | |
| 488 break; | |
| 489 default: | |
| 490 r->type = base::MallocRange::UNKNOWN; | |
| 491 break; | |
| 492 } | |
| 493 return true; | |
| 494 } | |
| 495 | |
| 496 static void RecordGrowth(size_t growth) { | 420 static void RecordGrowth(size_t growth) { |
| 497 StackTrace* t = Static::stacktrace_allocator()->New(); | 421 StackTrace* t = Static::stacktrace_allocator()->New(); |
| 498 t->depth = GetStackTrace(t->stack, kMaxStackDepth-1, 3); | 422 t->depth = GetStackTrace(t->stack, kMaxStackDepth-1, 3); |
| 499 t->size = growth; | 423 t->size = growth; |
| 500 t->stack[kMaxStackDepth-1] = reinterpret_cast<void*>(Static::growth_stacks()); | 424 t->stack[kMaxStackDepth-1] = reinterpret_cast<void*>(Static::growth_stacks()); |
| 501 Static::set_growth_stacks(t); | 425 Static::set_growth_stacks(t); |
| 502 } | 426 } |
| 503 | 427 |
| 504 bool PageHeap::GrowHeap(Length n) { | 428 bool PageHeap::GrowHeap(Length n) { |
| 505 ASSERT(kMaxPages >= kMinSystemAlloc); | 429 ASSERT(kMaxPages >= kMinSystemAlloc); |
| 506 if (n > kMaxValidPages) return false; | 430 if (n > kMaxValidPages) return false; |
| 507 Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc); | 431 Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc); |
| 508 size_t actual_size; | 432 size_t actual_size; |
| 509 void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); | 433 void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); |
| 510 if (ptr == NULL) { | 434 if (ptr == NULL) { |
| 511 if (n < ask) { | 435 if (n < ask) { |
| 512 // Try growing just "n" pages | 436 // Try growing just "n" pages |
| 513 ask = n; | 437 ask = n; |
| 514 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); | 438 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); |
| 515 } | 439 } |
| 516 if (ptr == NULL) return false; | 440 if (ptr == NULL) return false; |
| 517 } | 441 } |
| 518 ask = actual_size >> kPageShift; | 442 ask = actual_size >> kPageShift; |
| 519 RecordGrowth(ask << kPageShift); | 443 RecordGrowth(ask << kPageShift); |
| 520 | 444 |
| 521 uint64_t old_system_bytes = stats_.system_bytes; | 445 uint64_t old_system_bytes = system_bytes_; |
| 522 stats_.system_bytes += (ask << kPageShift); | 446 system_bytes_ += (ask << kPageShift); |
| 523 stats_.committed_bytes += (ask << kPageShift); | 447 committed_bytes_ += (ask << kPageShift); |
| 524 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; | 448 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; |
| 525 ASSERT(p > 0); | 449 ASSERT(p > 0); |
| 526 | 450 |
| 527 // If we have already a lot of pages allocated, just pre allocate a bunch of | 451 // If we have already a lot of pages allocated, just pre allocate a bunch of |
| 528 // memory for the page map. This prevents fragmentation by pagemap metadata | 452 // memory for the page map. This prevents fragmentation by pagemap metadata |
| 529 // when a program keeps allocating and freeing large blocks. | 453 // when a program keeps allocating and freeing large blocks. |
| 530 | 454 |
| 531 if (old_system_bytes < kPageMapBigAllocationThreshold | 455 if (old_system_bytes < kPageMapBigAllocationThreshold |
| 532 && stats_.system_bytes >= kPageMapBigAllocationThreshold) { | 456 && system_bytes_ >= kPageMapBigAllocationThreshold) { |
| 533 pagemap_.PreallocateMoreMemory(); | 457 pagemap_.PreallocateMoreMemory(); |
| 534 } | 458 } |
| 535 | 459 |
| 536 // Make sure pagemap_ has entries for all of the new pages. | 460 // Make sure pagemap_ has entries for all of the new pages. |
| 537 // Plus ensure one before and one after so coalescing code | 461 // Plus ensure one before and one after so coalescing code |
| 538 // does not need bounds-checking. | 462 // does not need bounds-checking. |
| 539 if (pagemap_.Ensure(p-1, ask+2)) { | 463 if (pagemap_.Ensure(p-1, ask+2)) { |
| 540 // Pretend the new area is allocated and then Delete() it to cause | 464 // Pretend the new area is allocated and then Delete() it to |
| 541 // any necessary coalescing to occur. | 465 // cause any necessary coalescing to occur. |
| 466 // |
| 467 // We do not adjust free_pages_ here since Delete() will do it for us. |
| 542 Span* span = NewSpan(p, ask); | 468 Span* span = NewSpan(p, ask); |
| 543 RecordSpan(span); | 469 RecordSpan(span); |
| 544 Delete(span); | 470 Delete(span); |
| 545 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes); | |
| 546 ASSERT(Check()); | 471 ASSERT(Check()); |
| 547 return true; | 472 return true; |
| 548 } else { | 473 } else { |
| 549 // We could not allocate memory within "pagemap_" | 474 // We could not allocate memory within "pagemap_" |
| 550 // TODO: Once we can return memory to the system, return the new span | 475 // TODO: Once we can return memory to the system, return the new span |
| 551 return false; | 476 return false; |
| 552 } | 477 } |
| 553 } | 478 } |
| 554 | 479 |
| 555 bool PageHeap::Check() { | 480 bool PageHeap::Check() { |
| (...skipping 18 matching lines...) Expand all Loading... |
| 574 for (Span* s = list->next; s != list; s = s->next) { | 499 for (Span* s = list->next; s != list; s = s->next) { |
| 575 CHECK_CONDITION(s->location == freelist); // NORMAL or RETURNED | 500 CHECK_CONDITION(s->location == freelist); // NORMAL or RETURNED |
| 576 CHECK_CONDITION(s->length >= min_pages); | 501 CHECK_CONDITION(s->length >= min_pages); |
| 577 CHECK_CONDITION(s->length <= max_pages); | 502 CHECK_CONDITION(s->length <= max_pages); |
| 578 CHECK_CONDITION(GetDescriptor(s->start) == s); | 503 CHECK_CONDITION(GetDescriptor(s->start) == s); |
| 579 CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s); | 504 CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s); |
| 580 } | 505 } |
| 581 return true; | 506 return true; |
| 582 } | 507 } |
| 583 | 508 |
| 509 void PageHeap::ReleaseFreeList(Span* list, Span* returned) { |
| 510 // Walk backwards through list so that when we push these |
| 511 // spans on the "returned" list, we preserve the order. |
| 512 while (!DLL_IsEmpty(list)) { |
| 513 Span* s = list->prev; |
| 514 DLL_Remove(s); |
| 515 DLL_Prepend(returned, s); |
| 516 ASSERT(s->location == Span::ON_NORMAL_FREELIST); |
| 517 s->location = Span::ON_RETURNED_FREELIST; |
| 518 DecommitSpan(s); |
| 519 } |
| 520 } |
| 521 |
| 522 void PageHeap::ReleaseFreePages() { |
| 523 for (Length s = 0; s < kMaxPages; s++) { |
| 524 ReleaseFreeList(&free_[s].normal, &free_[s].returned); |
| 525 } |
| 526 ReleaseFreeList(&large_.normal, &large_.returned); |
| 527 ASSERT(Check()); |
| 528 } |
| 529 |
| 584 } // namespace tcmalloc | 530 } // namespace tcmalloc |
| OLD | NEW |