Index: third_party/tcmalloc/chromium/src/page_heap.cc |
diff --git a/third_party/tcmalloc/chromium/src/page_heap.cc b/third_party/tcmalloc/chromium/src/page_heap.cc |
index 248e4622ac77ff1c54b60eb52bc82fe86553189e..83ff8922c9a186ab37cc6875e31a454c9bdfc7ea 100644 |
--- a/third_party/tcmalloc/chromium/src/page_heap.cc |
+++ b/third_party/tcmalloc/chromium/src/page_heap.cc |
@@ -100,6 +100,7 @@ Span* PageHeap::New(Length n) { |
// Grow the heap and try again. |
if (!GrowHeap(n)) { |
+ ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes); |
ASSERT(Check()); |
return NULL; |
} |
@@ -160,6 +161,18 @@ Span* PageHeap::Split(Span* span, Length n) { |
return leftover; |
} |
+void PageHeap::CommitSpan(Span* span) { |
+ TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift), |
+ static_cast<size_t>(span->length << kPageShift)); |
+ stats_.committed_bytes += span->length << kPageShift; |
+} |
+ |
+void PageHeap::DecommitSpan(Span* span) { |
+ TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift), |
+ static_cast<size_t>(span->length << kPageShift)); |
+ stats_.committed_bytes -= span->length << kPageShift; |
+} |
+ |
Span* PageHeap::Carve(Span* span, Length n) { |
ASSERT(n > 0); |
ASSERT(span->location != Span::IN_USE); |
@@ -175,11 +188,29 @@ Span* PageHeap::Carve(Span* span, Length n) { |
leftover->location = old_location; |
Event(leftover, 'S', extra); |
RecordSpan(leftover); |
+ |
+ // The previous span of |leftover| was just splitted -- no need to |
+ // coalesce them. The next span of |leftover| was not previously coalesced |
+ // with |span|, i.e. is NULL or has got location other than |old_location|. |
+ const PageID p = leftover->start; |
+ const Length len = leftover->length; |
+ Span* next = GetDescriptor(p+len); |
+ ASSERT (next == NULL || |
+ next->location == Span::IN_USE || |
+ next->location != leftover->location); |
+ |
PrependToFreeList(leftover); // Skip coalescing - no candidates possible |
span->length = n; |
pagemap_.set(span->start + n - 1, span); |
} |
ASSERT(Check()); |
+ if (old_location == Span::ON_RETURNED_FREELIST) { |
+ // We need to recommit this address space. |
+ CommitSpan(span); |
+ } |
+ ASSERT(span->location == Span::IN_USE); |
+ ASSERT(span->length == n); |
+ ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes); |
return span; |
} |
@@ -196,6 +227,7 @@ void PageHeap::Delete(Span* span) { |
Event(span, 'D', span->length); |
MergeIntoFreeList(span); // Coalesces if possible |
IncrementalScavenge(n); |
+ ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes); |
ASSERT(Check()); |
} |
@@ -207,15 +239,37 @@ void PageHeap::MergeIntoFreeList(Span* span) { |
// entries for the pieces we are merging together because we only |
// care about the pagemap entries for the boundaries. |
// |
- // Note that only similar spans are merged together. For example, |
- // we do not coalesce "returned" spans with "normal" spans. |
+ // Note that the adjacent spans we merge into "span" may come out of a |
+ // "normal" (committed) list, and cleanly merge with our IN_USE span, which |
+ // is implicitly committed. If the adjacents spans are on the "returned" |
+ // (decommitted) list, then we must get both spans into the same state before |
+ // or after we coalesce them. The current code always decomits. This is |
+ // achieved by blindly decommitting the entire coalesced region, which may |
+ // include any combination of committed and decommitted spans, at the end of |
+ // the method. |
+ |
+ // TODO(jar): "Always decommit" causes some extra calls to commit when we are |
+ // called in GrowHeap() during an allocation :-/. We need to eval the cost of |
+ // that oscillation, and possibly do something to reduce it. |
+ |
+ // TODO(jar): We need a better strategy for deciding to commit, or decommit, |
+ // based on memory usage and free heap sizes. |
+ |
const PageID p = span->start; |
const Length n = span->length; |
Span* prev = GetDescriptor(p-1); |
- if (prev != NULL && prev->location == span->location) { |
+ if (prev != NULL && prev->location != Span::IN_USE) { |
// Merge preceding span into this span |
ASSERT(prev->start + prev->length == p); |
const Length len = prev->length; |
+ if (prev->location == Span::ON_RETURNED_FREELIST) { |
+ // We're about to put the merge span into the returned freelist and call |
+ // DecommitSpan() on it, which will mark the entire span including this |
+ // one as released and decrease stats_.committed_bytes by the size of the |
+ // merged span. To make the math work out we temporarily increase the |
+ // stats_.committed_bytes amount. |
+ stats_.committed_bytes += prev->length << kPageShift; |
+ } |
RemoveFromFreeList(prev); |
DeleteSpan(prev); |
span->start -= len; |
@@ -224,10 +278,14 @@ void PageHeap::MergeIntoFreeList(Span* span) { |
Event(span, 'L', len); |
} |
Span* next = GetDescriptor(p+n); |
- if (next != NULL && next->location == span->location) { |
+ if (next != NULL && next->location != Span::IN_USE) { |
// Merge next span into this span |
ASSERT(next->start == p+n); |
const Length len = next->length; |
+ if (next->location == Span::ON_RETURNED_FREELIST) { |
+ // See the comment below 'if (prev->location ...' for explanation. |
+ stats_.committed_bytes += next->length << kPageShift; |
+ } |
RemoveFromFreeList(next); |
DeleteSpan(next); |
span->length += len; |
@@ -235,6 +293,9 @@ void PageHeap::MergeIntoFreeList(Span* span) { |
Event(span, 'R', len); |
} |
+ Event(span, 'D', span->length); |
+ span->location = Span::ON_RETURNED_FREELIST; |
+ DecommitSpan(span); |
PrependToFreeList(span); |
} |
@@ -499,6 +560,7 @@ bool PageHeap::GrowHeap(Length n) { |
uint64_t old_system_bytes = stats_.system_bytes; |
stats_.system_bytes += (ask << kPageShift); |
+ stats_.committed_bytes += (ask << kPageShift); |
const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; |
ASSERT(p > 0); |
@@ -520,6 +582,7 @@ bool PageHeap::GrowHeap(Length n) { |
Span* span = NewSpan(p, ask); |
RecordSpan(span); |
Delete(span); |
+ ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes); |
ASSERT(Check()); |
return true; |
} else { |