Index: third_party/tcmalloc/page_heap.cc |
=================================================================== |
--- third_party/tcmalloc/page_heap.cc (revision 25133) |
+++ third_party/tcmalloc/page_heap.cc (working copy) |
@@ -51,7 +51,12 @@ |
pagemap_cache_(0), |
free_pages_(0), |
system_bytes_(0), |
+#if DEFER_DECOMMIT |
+ free_committed_pages_(0), |
+ pages_committed_since_last_scavenge_(0), |
+#else |
scavenge_counter_(0), |
+#endif |
// Start scavenging at kMaxPages list |
scavenge_index_(kMaxPages-1) { |
COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits); |
@@ -150,6 +155,16 @@ |
return leftover; |
} |
+void PageHeap::CommitSpan(Span* span) { |
+ TCMalloc_SystemCommit( |
+ reinterpret_cast<void*>(span->start << kPageShift), |
+ static_cast<size_t>(span->length << kPageShift) |
+ ); |
+#if DEFER_DECOMMIT |
+ pages_committed_since_last_scavenge_ += span->length; |
+#endif |
+} |
+ |
Span* PageHeap::Carve(Span* span, Length n) { |
ASSERT(n > 0); |
ASSERT(span->location != Span::IN_USE); |
@@ -175,14 +190,21 @@ |
span->length = n; |
pagemap_.set(span->start + n - 1, span); |
} |
- ASSERT(Check()); |
- free_pages_ -= n; |
if (old_location == Span::ON_RETURNED_FREELIST) { |
// We need to recommit this address space. |
- TCMalloc_SystemCommit( |
- reinterpret_cast<void*>(span->start << kPageShift), |
- static_cast<size_t>(span->length << kPageShift)); |
+ CommitSpan(span); |
+ } else { |
+#if DEFER_DECOMMIT |
+ // The newly allocated memory is from a span that's already committed. |
+ // Update the free_committed_pages_ count. |
+ ASSERT(free_committed_pages_ >= n); |
+ free_committed_pages_ -= n; |
+#endif |
} |
+ ASSERT(span->location == Span::IN_USE); |
+ ASSERT(span->length == n); |
+ ASSERT(Check()); |
+ free_pages_ -= n; |
return span; |
} |
@@ -201,10 +223,9 @@ |
// care about the pagemap entries for the boundaries. |
// |
// Note that the spans we merge into "span" may come out of |
- // a "normal" list. For simplicity, we move these into the |
- // "returned" list of the appropriate size class. We do this |
- // so that we can maximize large, continuous blocks of freed |
- // space. |
+ // a "returned" list. We move those into "normal" list |
+ // as for 'immediate' operations we favour committing over |
+ // decommitting (decommitting is performed offline). |
const PageID p = span->start; |
const Length n = span->length; |
Span* prev = GetDescriptor(p-1); |
@@ -212,6 +233,12 @@ |
// Merge preceding span into this span |
ASSERT(prev->start + prev->length == p); |
const Length len = prev->length; |
+ if (prev->location == Span::ON_RETURNED_FREELIST) { |
+ CommitSpan(prev); |
+#if DEFER_DECOMMIT |
+ free_committed_pages_ += len; |
+#endif |
+ } |
DLL_Remove(prev); |
DeleteSpan(prev); |
span->start -= len; |
@@ -224,6 +251,12 @@ |
// Merge next span into this span |
ASSERT(next->start == p+n); |
const Length len = next->length; |
+ if (next->location == Span::ON_RETURNED_FREELIST) { |
+ CommitSpan(next); |
+#if DEFER_DECOMMIT |
+ free_committed_pages_ += len; |
+#endif |
+ } |
DLL_Remove(next); |
DeleteSpan(next); |
span->length += len; |
@@ -232,19 +265,93 @@ |
} |
Event(span, 'D', span->length); |
- span->location = Span::ON_RETURNED_FREELIST; |
- TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift), |
- static_cast<size_t>(span->length << kPageShift)); |
+ span->location = Span::ON_NORMAL_FREELIST; |
if (span->length < kMaxPages) |
- DLL_Prepend(&free_[span->length].returned, span); |
+ DLL_Prepend(&free_[span->length].normal, span); |
else |
- DLL_Prepend(&large_.returned, span); |
+ DLL_Prepend(&large_.normal, span); |
free_pages_ += n; |
+#if DEFER_DECOMMIT |
+ free_committed_pages_ += n; |
+#endif |
+#if DEFER_DECOMMIT |
+ // TODO(antonm): notify that could start scavenging |
+#else |
IncrementalScavenge(n); |
+#endif |
ASSERT(Check()); |
} |
+ |
+void PageHeap::Scavenge() { |
+#if DEFER_DECOMMIT |
+ // If we have to commit memory since the last scavenge, it means we don't |
+ // have enough free committed pages of necessary size for the amount of |
+ // allocations that we do. So hold off on releasing memory back to the system. |
+ if (pages_committed_since_last_scavenge_ > 0) { |
+ pages_committed_since_last_scavenge_ = 0; |
+ return; |
+ } |
+ |
+ if (free_committed_pages_ <= kMinimumFreeCommittedPageCount) { |
+ return; |
+ } |
+ |
+ uint64_t to_decommit = std::min( |
+ free_committed_pages_ - kMinimumFreeCommittedPageCount, |
+ free_committed_pages_ / kMaxScavengeAmountFactor); |
+ to_decommit = DecommitFromSpanList(&large_, to_decommit); |
+ for (int i = kMaxPages - 1; i >= 0; i--) { |
+ to_decommit = DecommitFromSpanList(&free_[i], to_decommit); |
+ } |
+ |
+ // Force at least one decommit from large list, otherwise big sized blocks |
+ // sitting might block as from releasing smaller blocks behind. |
+ if (to_decommit > 0) { |
+ if (!DLL_IsEmpty(&large_.normal)) { |
+ DecommitLastSpan(&large_, large_.normal.prev); |
+ } |
+ } |
+#endif |
+} |
+ |
+#if DEFER_DECOMMIT |
+Length PageHeap::DecommitLastSpan(SpanList* span_list, Span* span) { |
+ ASSERT(!DLL_IsEmpty(&span_list->normal)); |
+ ASSERT(span_list->normal.prev == span); |
+ |
+ Length length = span->length; |
+ |
+ DLL_Remove(span); |
+ |
+ TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift), span->length << kPageShift); |
+ span->location = Span::ON_RETURNED_FREELIST; |
+ ASSERT(free_committed_pages_ >= length); |
+ free_committed_pages_ -= length; |
+ |
+ DLL_Prepend(&span_list->returned, span); |
+ |
+ return length; |
+} |
+ |
+uint64_t PageHeap::DecommitFromSpanList(SpanList* span_list, uint64_t to_decommit) { |
+ while (!DLL_IsEmpty(&span_list->normal)) { |
+ // Release the last span on the normal portion of this list. |
+ Span* span = span_list->normal.prev; |
+ |
+ if (span->length > to_decommit) { |
+ return to_decommit; |
+ } |
+ |
+ to_decommit -= DecommitLastSpan(span_list, span); |
+ } |
+ |
+ return to_decommit; |
+} |
+ |
+#else |
+ |
void PageHeap::IncrementalScavenge(Length n) { |
// Fast path; not yet time to release memory |
scavenge_counter_ -= n; |
@@ -304,6 +411,7 @@ |
// Nothing to scavenge, delay for a while |
scavenge_counter_ = kDefaultReleaseDelay; |
} |
+#endif |
void PageHeap::RegisterSizeClass(Span* span, size_t sc) { |
// Associate span object with all interior pages as well |
@@ -405,6 +513,9 @@ |
if (ptr == NULL) return false; |
} |
ask = actual_size >> kPageShift; |
+#if DEFER_DECOMMIT |
+ pages_committed_since_last_scavenge_ += ask; |
+#endif |
RecordGrowth(ask << kPageShift); |
uint64_t old_system_bytes = system_bytes_; |
@@ -490,6 +601,9 @@ |
} |
ReleaseFreeList(&large_.normal, &large_.returned); |
ASSERT(Check()); |
+#if DEFER_DECOMMIT |
+ free_committed_pages_ = 0; |
+#endif |
} |
} // namespace tcmalloc |