Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(76)

Side by Side Diff: third_party/tcmalloc/page_heap.cc

Issue 251065: Tracks the amount of committed vs uncommitted memory in tcmalloc's page heap's freelists (Closed)
Patch Set: whitespace Created 11 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « third_party/tcmalloc/page_heap.h ('k') | third_party/tcmalloc/tcmalloc.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2008, Google Inc. 1 // Copyright (c) 2008, Google Inc.
2 // All rights reserved. 2 // All rights reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // * Redistributions of source code must retain the above copyright 8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer. 9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above 10 // * Redistributions in binary form must reproduce the above
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
44 "to return memory slower. Reasonable rates are in the " 44 "to return memory slower. Reasonable rates are in the "
45 "range [0,10]"); 45 "range [0,10]");
46 46
47 namespace tcmalloc { 47 namespace tcmalloc {
48 48
49 PageHeap::PageHeap() 49 PageHeap::PageHeap()
50 : pagemap_(MetaDataAlloc), 50 : pagemap_(MetaDataAlloc),
51 pagemap_cache_(0), 51 pagemap_cache_(0),
52 free_pages_(0), 52 free_pages_(0),
53 system_bytes_(0), 53 system_bytes_(0),
54 committed_bytes_(0),
54 scavenge_counter_(0), 55 scavenge_counter_(0),
55 // Start scavenging at kMaxPages list 56 // Start scavenging at kMaxPages list
56 scavenge_index_(kMaxPages-1) { 57 scavenge_index_(kMaxPages-1) {
57 COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits); 58 COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits);
58 DLL_Init(&large_.normal); 59 DLL_Init(&large_.normal);
59 DLL_Init(&large_.returned); 60 DLL_Init(&large_.returned);
60 for (int i = 0; i < kMaxPages; i++) { 61 for (int i = 0; i < kMaxPages; i++) {
61 DLL_Init(&free_[i].normal); 62 DLL_Init(&free_[i].normal);
62 DLL_Init(&free_[i].returned); 63 DLL_Init(&free_[i].returned);
63 } 64 }
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
146 RecordSpan(leftover); 147 RecordSpan(leftover);
147 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span 148 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span
148 span->length = n; 149 span->length = n;
149 150
150 return leftover; 151 return leftover;
151 } 152 }
152 153
153 void PageHeap::CommitSpan(Span* span) { 154 void PageHeap::CommitSpan(Span* span) {
154 TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift), 155 TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift),
155 static_cast<size_t>(span->length << kPageShift)); 156 static_cast<size_t>(span->length << kPageShift));
157 committed_bytes_ += span->length << kPageShift;
156 } 158 }
157 159
158 void PageHeap::DecommitSpan(Span* span) { 160 void PageHeap::DecommitSpan(Span* span) {
159 TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift), 161 TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift),
160 static_cast<size_t>(span->length << kPageShift)); 162 static_cast<size_t>(span->length << kPageShift));
163 committed_bytes_ -= span->length << kPageShift;
161 } 164 }
162 165
163 Span* PageHeap::Carve(Span* span, Length n) { 166 Span* PageHeap::Carve(Span* span, Length n) {
164 ASSERT(n > 0); 167 ASSERT(n > 0);
165 ASSERT(span->location != Span::IN_USE); 168 ASSERT(span->location != Span::IN_USE);
166 const int old_location = span->location; 169 const int old_location = span->location;
167 DLL_Remove(span); 170 DLL_Remove(span);
168 span->location = Span::IN_USE; 171 span->location = Span::IN_USE;
169 Event(span, 'A', n); 172 Event(span, 'A', n);
170 173
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
226 // TODO(jar): We need a better strategy for deciding to commit, or decommit, 229 // TODO(jar): We need a better strategy for deciding to commit, or decommit,
227 // based on memory usage and free heap sizes. 230 // based on memory usage and free heap sizes.
228 231
229 const PageID p = span->start; 232 const PageID p = span->start;
230 const Length n = span->length; 233 const Length n = span->length;
231 Span* prev = GetDescriptor(p-1); 234 Span* prev = GetDescriptor(p-1);
232 if (prev != NULL && prev->location != Span::IN_USE) { 235 if (prev != NULL && prev->location != Span::IN_USE) {
233 // Merge preceding span into this span 236 // Merge preceding span into this span
234 ASSERT(prev->start + prev->length == p); 237 ASSERT(prev->start + prev->length == p);
235 const Length len = prev->length; 238 const Length len = prev->length;
239 if (prev->location == Span::ON_RETURNED_FREELIST) {
240 // We're about to put the merge span into the returned freelist and call
241 // DecommitSpan() on it, which will mark the entire span including this
242 // one as released and decrease committed_bytes_ by the size of the
243 // merged span. To make the math work out we temporarily increase the
244 // committed_bytes_ amount.
245 committed_bytes_ += prev->length << kPageShift;
246 }
236 DLL_Remove(prev); 247 DLL_Remove(prev);
237 DeleteSpan(prev); 248 DeleteSpan(prev);
238 span->start -= len; 249 span->start -= len;
239 span->length += len; 250 span->length += len;
240 pagemap_.set(span->start, span); 251 pagemap_.set(span->start, span);
241 Event(span, 'L', len); 252 Event(span, 'L', len);
242 } 253 }
243 Span* next = GetDescriptor(p+n); 254 Span* next = GetDescriptor(p+n);
244 if (next != NULL && next->location != Span::IN_USE) { 255 if (next != NULL && next->location != Span::IN_USE) {
245 // Merge next span into this span 256 // Merge next span into this span
246 ASSERT(next->start == p+n); 257 ASSERT(next->start == p+n);
247 const Length len = next->length; 258 const Length len = next->length;
259 if (next->location == Span::ON_RETURNED_FREELIST) {
260 // See the comment below 'if (prev->location ...' for explanation.
261 committed_bytes_ += next->length << kPageShift;
262 }
248 DLL_Remove(next); 263 DLL_Remove(next);
249 DeleteSpan(next); 264 DeleteSpan(next);
250 span->length += len; 265 span->length += len;
251 pagemap_.set(span->start + span->length - 1, span); 266 pagemap_.set(span->start + span->length - 1, span);
252 Event(span, 'R', len); 267 Event(span, 'R', len);
253 } 268 }
254 269
255 Event(span, 'D', span->length); 270 Event(span, 'D', span->length);
256 span->location = Span::ON_RETURNED_FREELIST; 271 span->location = Span::ON_RETURNED_FREELIST;
257 DecommitSpan(span); 272 DecommitSpan(span);
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
292 // Find index of free list to scavenge 307 // Find index of free list to scavenge
293 int index = scavenge_index_ + 1; 308 int index = scavenge_index_ + 1;
294 for (int i = 0; i < kMaxPages+1; i++) { 309 for (int i = 0; i < kMaxPages+1; i++) {
295 if (index > kMaxPages) index = 0; 310 if (index > kMaxPages) index = 0;
296 SpanList* slist = (index == kMaxPages) ? &large_ : &free_[index]; 311 SpanList* slist = (index == kMaxPages) ? &large_ : &free_[index];
297 if (!DLL_IsEmpty(&slist->normal)) { 312 if (!DLL_IsEmpty(&slist->normal)) {
298 // Release the last span on the normal portion of this list 313 // Release the last span on the normal portion of this list
299 Span* s = slist->normal.prev; 314 Span* s = slist->normal.prev;
300 ASSERT(s->location == Span::ON_NORMAL_FREELIST); 315 ASSERT(s->location == Span::ON_NORMAL_FREELIST);
301 DLL_Remove(s); 316 DLL_Remove(s);
302 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift), 317 DecommitSpan(s);
303 static_cast<size_t>(s->length << kPageShift));
304 s->location = Span::ON_RETURNED_FREELIST; 318 s->location = Span::ON_RETURNED_FREELIST;
305 DLL_Prepend(&slist->returned, s); 319 DLL_Prepend(&slist->returned, s);
306 320
307 // Compute how long to wait until we return memory. 321 // Compute how long to wait until we return memory.
308 // FLAGS_tcmalloc_release_rate==1 means wait for 1000 pages 322 // FLAGS_tcmalloc_release_rate==1 means wait for 1000 pages
309 // after releasing one page. 323 // after releasing one page.
310 const double mult = 1000.0 / rate; 324 const double mult = 1000.0 / rate;
311 double wait = mult * static_cast<double>(s->length); 325 double wait = mult * static_cast<double>(s->length);
312 if (wait > kMaxReleaseDelay) { 326 if (wait > kMaxReleaseDelay) {
313 // Avoid overflow and bound to reasonable range 327 // Avoid overflow and bound to reasonable range
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after
423 ask = n; 437 ask = n;
424 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); 438 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
425 } 439 }
426 if (ptr == NULL) return false; 440 if (ptr == NULL) return false;
427 } 441 }
428 ask = actual_size >> kPageShift; 442 ask = actual_size >> kPageShift;
429 RecordGrowth(ask << kPageShift); 443 RecordGrowth(ask << kPageShift);
430 444
431 uint64_t old_system_bytes = system_bytes_; 445 uint64_t old_system_bytes = system_bytes_;
432 system_bytes_ += (ask << kPageShift); 446 system_bytes_ += (ask << kPageShift);
447 committed_bytes_ += (ask << kPageShift);
433 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; 448 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
434 ASSERT(p > 0); 449 ASSERT(p > 0);
435 450
436 // If we have already a lot of pages allocated, just pre allocate a bunch of 451 // If we have already a lot of pages allocated, just pre allocate a bunch of
437 // memory for the page map. This prevents fragmentation by pagemap metadata 452 // memory for the page map. This prevents fragmentation by pagemap metadata
438 // when a program keeps allocating and freeing large blocks. 453 // when a program keeps allocating and freeing large blocks.
439 454
440 if (old_system_bytes < kPageMapBigAllocationThreshold 455 if (old_system_bytes < kPageMapBigAllocationThreshold
441 && system_bytes_ >= kPageMapBigAllocationThreshold) { 456 && system_bytes_ >= kPageMapBigAllocationThreshold) {
442 pagemap_.PreallocateMoreMemory(); 457 pagemap_.PreallocateMoreMemory();
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
484 for (Span* s = list->next; s != list; s = s->next) { 499 for (Span* s = list->next; s != list; s = s->next) {
485 CHECK_CONDITION(s->location == freelist); // NORMAL or RETURNED 500 CHECK_CONDITION(s->location == freelist); // NORMAL or RETURNED
486 CHECK_CONDITION(s->length >= min_pages); 501 CHECK_CONDITION(s->length >= min_pages);
487 CHECK_CONDITION(s->length <= max_pages); 502 CHECK_CONDITION(s->length <= max_pages);
488 CHECK_CONDITION(GetDescriptor(s->start) == s); 503 CHECK_CONDITION(GetDescriptor(s->start) == s);
489 CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s); 504 CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);
490 } 505 }
491 return true; 506 return true;
492 } 507 }
493 508
494 static void ReleaseFreeList(Span* list, Span* returned) { 509 void PageHeap::ReleaseFreeList(Span* list, Span* returned) {
495 // Walk backwards through list so that when we push these 510 // Walk backwards through list so that when we push these
496 // spans on the "returned" list, we preserve the order. 511 // spans on the "returned" list, we preserve the order.
497 while (!DLL_IsEmpty(list)) { 512 while (!DLL_IsEmpty(list)) {
498 Span* s = list->prev; 513 Span* s = list->prev;
499 DLL_Remove(s); 514 DLL_Remove(s);
500 DLL_Prepend(returned, s); 515 DLL_Prepend(returned, s);
501 ASSERT(s->location == Span::ON_NORMAL_FREELIST); 516 ASSERT(s->location == Span::ON_NORMAL_FREELIST);
502 s->location = Span::ON_RETURNED_FREELIST; 517 s->location = Span::ON_RETURNED_FREELIST;
503 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift), 518 DecommitSpan(s);
504 static_cast<size_t>(s->length << kPageShift));
505 } 519 }
506 } 520 }
507 521
508 void PageHeap::ReleaseFreePages() { 522 void PageHeap::ReleaseFreePages() {
509 for (Length s = 0; s < kMaxPages; s++) { 523 for (Length s = 0; s < kMaxPages; s++) {
510 ReleaseFreeList(&free_[s].normal, &free_[s].returned); 524 ReleaseFreeList(&free_[s].normal, &free_[s].returned);
511 } 525 }
512 ReleaseFreeList(&large_.normal, &large_.returned); 526 ReleaseFreeList(&large_.normal, &large_.returned);
513 ASSERT(Check()); 527 ASSERT(Check());
514 } 528 }
515 529
516 } // namespace tcmalloc 530 } // namespace tcmalloc
OLDNEW
« no previous file with comments | « third_party/tcmalloc/page_heap.h ('k') | third_party/tcmalloc/tcmalloc.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698