Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(226)

Unified Diff: third_party/tcmalloc/chromium/src/page_heap.cc

Issue 576001: Merged third_party/tcmalloc/vendor/src(google-perftools r87) into... (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: Removed the unnecessary printf and ASSERT(0) Created 10 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « third_party/tcmalloc/chromium/src/page_heap.h ('k') | third_party/tcmalloc/chromium/src/pagemap.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: third_party/tcmalloc/chromium/src/page_heap.cc
===================================================================
--- third_party/tcmalloc/chromium/src/page_heap.cc (revision 41942)
+++ third_party/tcmalloc/chromium/src/page_heap.cc (working copy)
@@ -49,12 +49,9 @@
PageHeap::PageHeap()
: pagemap_(MetaDataAlloc),
pagemap_cache_(0),
- free_pages_(0),
- system_bytes_(0),
- committed_bytes_(0),
scavenge_counter_(0),
// Start scavenging at kMaxPages list
- scavenge_index_(kMaxPages-1) {
+ release_index_(kMaxPages) {
COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits);
DLL_Init(&large_.normal);
DLL_Init(&large_.returned);
@@ -90,6 +87,7 @@
// Grow the heap and try again
if (!GrowHeap(n)) {
+ ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
ASSERT(Check());
return NULL;
}
@@ -154,20 +152,20 @@
void PageHeap::CommitSpan(Span* span) {
TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift),
static_cast<size_t>(span->length << kPageShift));
- committed_bytes_ += span->length << kPageShift;
+ stats_.committed_bytes += span->length << kPageShift;
}
void PageHeap::DecommitSpan(Span* span) {
TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift),
static_cast<size_t>(span->length << kPageShift));
- committed_bytes_ -= span->length << kPageShift;
+ stats_.committed_bytes -= span->length << kPageShift;
}
Span* PageHeap::Carve(Span* span, Length n) {
ASSERT(n > 0);
ASSERT(span->location != Span::IN_USE);
const int old_location = span->location;
- DLL_Remove(span);
+ RemoveFromFreeList(span);
span->location = Span::IN_USE;
Event(span, 'A', n);
@@ -179,23 +177,28 @@
Event(leftover, 'S', extra);
RecordSpan(leftover);
- // Place leftover span on appropriate free list
- SpanList* listpair = (extra < kMaxPages) ? &free_[extra] : &large_;
- Span* dst = (leftover->location == Span::ON_RETURNED_FREELIST
- ? &listpair->returned : &listpair->normal);
- DLL_Prepend(dst, leftover);
+ // The previous span of |leftover| was just splitted -- no need to
+ // coalesce them. The next span of |leftover| was not previously coalesced
+ // with |span|, i.e. is NULL or has got location other than |old_location|.
+ const PageID p = leftover->start;
+ const Length len = leftover->length;
+ Span* next = GetDescriptor(p+len);
+ ASSERT (next == NULL ||
+ next->location == Span::IN_USE ||
+ next->location != leftover->location);
+ PrependToFreeList(leftover); // Skip coalescing - no candidates possible
span->length = n;
pagemap_.set(span->start + n - 1, span);
}
ASSERT(Check());
- free_pages_ -= n;
if (old_location == Span::ON_RETURNED_FREELIST) {
// We need to recommit this address space.
CommitSpan(span);
}
ASSERT(span->location == Span::IN_USE);
ASSERT(span->length == n);
+ ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
return span;
}
@@ -205,9 +208,20 @@
ASSERT(span->length > 0);
ASSERT(GetDescriptor(span->start) == span);
ASSERT(GetDescriptor(span->start + span->length - 1) == span);
+ const Length n = span->length;
span->sizeclass = 0;
span->sample = 0;
+ span->location = Span::ON_NORMAL_FREELIST;
+ Event(span, 'D', span->length);
+ MergeIntoFreeList(span); // Coalesces if possible
+ IncrementalScavenge(n);
+ ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
+ ASSERT(Check());
+}
+void PageHeap::MergeIntoFreeList(Span* span) {
+ ASSERT(span->location != Span::IN_USE);
+
// Coalesce -- we guarantee that "p" != 0, so no bounds checking
// necessary. We do not bother resetting the stale pagemap
// entries for the pieces we are merging together because we only
@@ -239,12 +253,12 @@
if (prev->location == Span::ON_RETURNED_FREELIST) {
// We're about to put the merge span into the returned freelist and call
// DecommitSpan() on it, which will mark the entire span including this
- // one as released and decrease committed_bytes_ by the size of the
+ // one as released and decrease stats_.committed_bytes by the size of the
// merged span. To make the math work out we temporarily increase the
- // committed_bytes_ amount.
- committed_bytes_ += prev->length << kPageShift;
+ // stats_.committed_bytes amount.
+ stats_.committed_bytes += prev->length << kPageShift;
}
- DLL_Remove(prev);
+ RemoveFromFreeList(prev);
DeleteSpan(prev);
span->start -= len;
span->length += len;
@@ -258,9 +272,9 @@
const Length len = next->length;
if (next->location == Span::ON_RETURNED_FREELIST) {
// See the comment below 'if (prev->location ...' for explanation.
- committed_bytes_ += next->length << kPageShift;
+ stats_.committed_bytes += next->length << kPageShift;
}
- DLL_Remove(next);
+ RemoveFromFreeList(next);
DeleteSpan(next);
span->length += len;
pagemap_.set(span->start + span->length - 1, span);
@@ -270,15 +284,29 @@
Event(span, 'D', span->length);
span->location = Span::ON_RETURNED_FREELIST;
DecommitSpan(span);
- if (span->length < kMaxPages) {
- DLL_Prepend(&free_[span->length].returned, span);
+ PrependToFreeList(span);
+}
+
+void PageHeap::PrependToFreeList(Span* span) {
+ ASSERT(span->location != Span::IN_USE);
+ SpanList* list = (span->length < kMaxPages) ? &free_[span->length] : &large_;
+ if (span->location == Span::ON_NORMAL_FREELIST) {
+ stats_.free_bytes += (span->length << kPageShift);
+ DLL_Prepend(&list->normal, span);
} else {
- DLL_Prepend(&large_.returned, span);
+ stats_.unmapped_bytes += (span->length << kPageShift);
+ DLL_Prepend(&list->returned, span);
}
- free_pages_ += n;
+}
- IncrementalScavenge(n);
- ASSERT(Check());
+void PageHeap::RemoveFromFreeList(Span* span) {
+ ASSERT(span->location != Span::IN_USE);
+ if (span->location == Span::ON_NORMAL_FREELIST) {
+ stats_.free_bytes -= (span->length << kPageShift);
+ } else {
+ stats_.unmapped_bytes -= (span->length << kPageShift);
+ }
+ DLL_Remove(span);
}
void PageHeap::IncrementalScavenge(Length n) {
@@ -286,17 +314,6 @@
scavenge_counter_ -= n;
if (scavenge_counter_ >= 0) return; // Not yet time to scavenge
- // Never delay scavenging for more than the following number of
- // deallocated pages. With 4K pages, this comes to 4GB of
- // deallocation.
- // Chrome: Changed to 64MB
- static const int kMaxReleaseDelay = 1 << 14;
-
- // If there is nothing to release, wait for so many pages before
- // scavenging again. With 4K pages, this comes to 1GB of memory.
- // Chrome: Changed to 16MB
- static const int kDefaultReleaseDelay = 1 << 12;
-
const double rate = FLAGS_tcmalloc_release_rate;
if (rate <= 1e-6) {
// Tiny release rate means that releasing is disabled.
@@ -304,42 +321,64 @@
return;
}
- // Find index of free list to scavenge
- int index = scavenge_index_ + 1;
- for (int i = 0; i < kMaxPages+1; i++) {
- if (index > kMaxPages) index = 0;
- SpanList* slist = (index == kMaxPages) ? &large_ : &free_[index];
- if (!DLL_IsEmpty(&slist->normal)) {
- // Release the last span on the normal portion of this list
- Span* s = slist->normal.prev;
- ASSERT(s->location == Span::ON_NORMAL_FREELIST);
- DLL_Remove(s);
- DecommitSpan(s);
- s->location = Span::ON_RETURNED_FREELIST;
- DLL_Prepend(&slist->returned, s);
+ Length released_pages = ReleaseAtLeastNPages(1);
- // Compute how long to wait until we return memory.
- // FLAGS_tcmalloc_release_rate==1 means wait for 1000 pages
- // after releasing one page.
- const double mult = 1000.0 / rate;
- double wait = mult * static_cast<double>(s->length);
- if (wait > kMaxReleaseDelay) {
- // Avoid overflow and bound to reasonable range
- wait = kMaxReleaseDelay;
- }
- scavenge_counter_ = static_cast<int64_t>(wait);
-
- scavenge_index_ = index; // Scavenge at index+1 next time
- // Note: we stop scavenging after finding one.
- return;
+ if (released_pages == 0) {
+ // Nothing to scavenge, delay for a while.
+ scavenge_counter_ = kDefaultReleaseDelay;
+ } else {
+ // Compute how long to wait until we return memory.
+ // FLAGS_tcmalloc_release_rate==1 means wait for 1000 pages
+ // after releasing one page.
+ const double mult = 1000.0 / rate;
+ double wait = mult * static_cast<double>(released_pages);
+ if (wait > kMaxReleaseDelay) {
+ // Avoid overflow and bound to reasonable range.
+ wait = kMaxReleaseDelay;
}
- index++;
+ scavenge_counter_ = static_cast<int64_t>(wait);
}
+}
- // Nothing to scavenge, delay for a while
- scavenge_counter_ = kDefaultReleaseDelay;
+Length PageHeap::ReleaseLastNormalSpan(SpanList* slist) {
+ Span* s = slist->normal.prev;
+ ASSERT(s->location == Span::ON_NORMAL_FREELIST);
+ RemoveFromFreeList(s);
+ const Length n = s->length;
+ TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
+ static_cast<size_t>(s->length << kPageShift));
+ s->location = Span::ON_RETURNED_FREELIST;
+ MergeIntoFreeList(s); // Coalesces if possible.
+ return n;
}
+Length PageHeap::ReleaseAtLeastNPages(Length num_pages) {
+ Length released_pages = 0;
+ Length prev_released_pages = -1;
+
+ // Round robin through the lists of free spans, releasing the last
+ // span in each list. Stop after releasing at least num_pages.
+ while (released_pages < num_pages) {
+ if (released_pages == prev_released_pages) {
+ // Last iteration of while loop made no progress.
+ break;
+ }
+ prev_released_pages = released_pages;
+
+ for (int i = 0; i < kMaxPages+1 && released_pages < num_pages;
+ i++, release_index_++) {
+ if (release_index_ > kMaxPages) release_index_ = 0;
+ SpanList* slist = (release_index_ == kMaxPages) ?
+ &large_ : &free_[release_index_];
+ if (!DLL_IsEmpty(&slist->normal)) {
+ Length released_len = ReleaseLastNormalSpan(slist);
+ released_pages += released_len;
+ }
+ }
+ }
+ return released_pages;
+}
+
void PageHeap::RegisterSizeClass(Span* span, size_t sc) {
// Associate span object with all interior pages as well
ASSERT(span->location == Span::IN_USE);
@@ -352,6 +391,10 @@
}
}
+static double MB(uint64_t bytes) {
+ return bytes / 1048576.0;
+}
+
static double PagesToMB(uint64_t pages) {
return (pages << kPageShift) / 1048576.0;
}
@@ -364,8 +407,8 @@
}
}
out->printf("------------------------------------------------\n");
- out->printf("PageHeap: %d sizes; %6.1f MB free\n",
- nonempty_sizes, PagesToMB(free_pages_));
+ out->printf("PageHeap: %d sizes; %6.1f MB free; %6.1f MB unmapped\n",
+ nonempty_sizes, MB(stats_.free_bytes), MB(stats_.unmapped_bytes));
out->printf("------------------------------------------------\n");
uint64_t total_normal = 0;
uint64_t total_returned = 0;
@@ -417,6 +460,37 @@
PagesToMB(total_returned));
}
+bool PageHeap::GetNextRange(PageID start, base::MallocRange* r) {
+ Span* span = reinterpret_cast<Span*>(pagemap_.Next(start));
+ if (span == NULL) {
+ return false;
+ }
+ r->address = span->start << kPageShift;
+ r->length = span->length << kPageShift;
+ r->fraction = 0;
+ switch (span->location) {
+ case Span::IN_USE:
+ r->type = base::MallocRange::INUSE;
+ r->fraction = 1;
+ if (span->sizeclass > 0) {
+ // Only some of the objects in this span may be in use.
+ const size_t osize = Static::sizemap()->class_to_size(span->sizeclass);
+ r->fraction = (1.0 * osize * span->refcount) / r->length;
+ }
+ break;
+ case Span::ON_NORMAL_FREELIST:
+ r->type = base::MallocRange::FREE;
+ break;
+ case Span::ON_RETURNED_FREELIST:
+ r->type = base::MallocRange::UNMAPPED;
+ break;
+ default:
+ r->type = base::MallocRange::UNKNOWN;
+ break;
+ }
+ return true;
+}
+
static void RecordGrowth(size_t growth) {
StackTrace* t = Static::stacktrace_allocator()->New();
t->depth = GetStackTrace(t->stack, kMaxStackDepth-1, 3);
@@ -442,9 +516,9 @@
ask = actual_size >> kPageShift;
RecordGrowth(ask << kPageShift);
- uint64_t old_system_bytes = system_bytes_;
- system_bytes_ += (ask << kPageShift);
- committed_bytes_ += (ask << kPageShift);
+ uint64_t old_system_bytes = stats_.system_bytes;
+ stats_.system_bytes += (ask << kPageShift);
+ stats_.committed_bytes += (ask << kPageShift);
const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
ASSERT(p > 0);
@@ -453,7 +527,7 @@
// when a program keeps allocating and freeing large blocks.
if (old_system_bytes < kPageMapBigAllocationThreshold
- && system_bytes_ >= kPageMapBigAllocationThreshold) {
+ && stats_.system_bytes >= kPageMapBigAllocationThreshold) {
pagemap_.PreallocateMoreMemory();
}
@@ -461,13 +535,12 @@
// Plus ensure one before and one after so coalescing code
// does not need bounds-checking.
if (pagemap_.Ensure(p-1, ask+2)) {
- // Pretend the new area is allocated and then Delete() it to
- // cause any necessary coalescing to occur.
- //
- // We do not adjust free_pages_ here since Delete() will do it for us.
+ // Pretend the new area is allocated and then Delete() it to cause
+ // any necessary coalescing to occur.
Span* span = NewSpan(p, ask);
RecordSpan(span);
Delete(span);
+ ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
ASSERT(Check());
return true;
} else {
@@ -506,25 +579,4 @@
return true;
}
-void PageHeap::ReleaseFreeList(Span* list, Span* returned) {
- // Walk backwards through list so that when we push these
- // spans on the "returned" list, we preserve the order.
- while (!DLL_IsEmpty(list)) {
- Span* s = list->prev;
- DLL_Remove(s);
- DLL_Prepend(returned, s);
- ASSERT(s->location == Span::ON_NORMAL_FREELIST);
- s->location = Span::ON_RETURNED_FREELIST;
- DecommitSpan(s);
- }
-}
-
-void PageHeap::ReleaseFreePages() {
- for (Length s = 0; s < kMaxPages; s++) {
- ReleaseFreeList(&free_[s].normal, &free_[s].returned);
- }
- ReleaseFreeList(&large_.normal, &large_.returned);
- ASSERT(Check());
-}
-
} // namespace tcmalloc
« no previous file with comments | « third_party/tcmalloc/chromium/src/page_heap.h ('k') | third_party/tcmalloc/chromium/src/pagemap.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698