Index: third_party/tcmalloc/chromium/src/page_heap.cc |
diff --git a/third_party/tcmalloc/chromium/src/page_heap.cc b/third_party/tcmalloc/chromium/src/page_heap.cc |
index 83ff8922c9a186ab37cc6875e31a454c9bdfc7ea..be46dd4cbe0b986a5b6ad3bff9ef706504f0fc73 100644 |
--- a/third_party/tcmalloc/chromium/src/page_heap.cc |
+++ b/third_party/tcmalloc/chromium/src/page_heap.cc |
@@ -100,7 +100,6 @@ Span* PageHeap::New(Length n) { |
// Grow the heap and try again. |
if (!GrowHeap(n)) { |
- ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes); |
ASSERT(Check()); |
return NULL; |
} |
@@ -161,18 +160,6 @@ Span* PageHeap::Split(Span* span, Length n) { |
return leftover; |
} |
-void PageHeap::CommitSpan(Span* span) { |
- TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift), |
- static_cast<size_t>(span->length << kPageShift)); |
- stats_.committed_bytes += span->length << kPageShift; |
-} |
- |
-void PageHeap::DecommitSpan(Span* span) { |
- TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift), |
- static_cast<size_t>(span->length << kPageShift)); |
- stats_.committed_bytes -= span->length << kPageShift; |
-} |
- |
Span* PageHeap::Carve(Span* span, Length n) { |
ASSERT(n > 0); |
ASSERT(span->location != Span::IN_USE); |
@@ -188,29 +175,11 @@ Span* PageHeap::Carve(Span* span, Length n) { |
leftover->location = old_location; |
Event(leftover, 'S', extra); |
RecordSpan(leftover); |
- |
- // The previous span of |leftover| was just splitted -- no need to |
- // coalesce them. The next span of |leftover| was not previously coalesced |
- // with |span|, i.e. is NULL or has got location other than |old_location|. |
- const PageID p = leftover->start; |
- const Length len = leftover->length; |
- Span* next = GetDescriptor(p+len); |
- ASSERT (next == NULL || |
- next->location == Span::IN_USE || |
- next->location != leftover->location); |
- |
PrependToFreeList(leftover); // Skip coalescing - no candidates possible |
span->length = n; |
pagemap_.set(span->start + n - 1, span); |
} |
ASSERT(Check()); |
- if (old_location == Span::ON_RETURNED_FREELIST) { |
- // We need to recommit this address space. |
- CommitSpan(span); |
- } |
- ASSERT(span->location == Span::IN_USE); |
- ASSERT(span->length == n); |
- ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes); |
return span; |
} |
@@ -227,7 +196,6 @@ void PageHeap::Delete(Span* span) { |
Event(span, 'D', span->length); |
MergeIntoFreeList(span); // Coalesces if possible |
IncrementalScavenge(n); |
- ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes); |
ASSERT(Check()); |
} |
@@ -239,37 +207,15 @@ void PageHeap::MergeIntoFreeList(Span* span) { |
// entries for the pieces we are merging together because we only |
// care about the pagemap entries for the boundaries. |
// |
- // Note that the adjacent spans we merge into "span" may come out of a |
- // "normal" (committed) list, and cleanly merge with our IN_USE span, which |
- // is implicitly committed. If the adjacents spans are on the "returned" |
- // (decommitted) list, then we must get both spans into the same state before |
- // or after we coalesce them. The current code always decomits. This is |
- // achieved by blindly decommitting the entire coalesced region, which may |
- // include any combination of committed and decommitted spans, at the end of |
- // the method. |
- |
- // TODO(jar): "Always decommit" causes some extra calls to commit when we are |
- // called in GrowHeap() during an allocation :-/. We need to eval the cost of |
- // that oscillation, and possibly do something to reduce it. |
- |
- // TODO(jar): We need a better strategy for deciding to commit, or decommit, |
- // based on memory usage and free heap sizes. |
- |
+ // Note that only similar spans are merged together. For example, |
+ // we do not coalesce "returned" spans with "normal" spans. |
const PageID p = span->start; |
const Length n = span->length; |
Span* prev = GetDescriptor(p-1); |
- if (prev != NULL && prev->location != Span::IN_USE) { |
+ if (prev != NULL && prev->location == span->location) { |
// Merge preceding span into this span |
ASSERT(prev->start + prev->length == p); |
const Length len = prev->length; |
- if (prev->location == Span::ON_RETURNED_FREELIST) { |
- // We're about to put the merge span into the returned freelist and call |
- // DecommitSpan() on it, which will mark the entire span including this |
- // one as released and decrease stats_.committed_bytes by the size of the |
- // merged span. To make the math work out we temporarily increase the |
- // stats_.committed_bytes amount. |
- stats_.committed_bytes += prev->length << kPageShift; |
- } |
RemoveFromFreeList(prev); |
DeleteSpan(prev); |
span->start -= len; |
@@ -278,14 +224,10 @@ void PageHeap::MergeIntoFreeList(Span* span) { |
Event(span, 'L', len); |
} |
Span* next = GetDescriptor(p+n); |
- if (next != NULL && next->location != Span::IN_USE) { |
+ if (next != NULL && next->location == span->location) { |
// Merge next span into this span |
ASSERT(next->start == p+n); |
const Length len = next->length; |
- if (next->location == Span::ON_RETURNED_FREELIST) { |
- // See the comment below 'if (prev->location ...' for explanation. |
- stats_.committed_bytes += next->length << kPageShift; |
- } |
RemoveFromFreeList(next); |
DeleteSpan(next); |
span->length += len; |
@@ -293,9 +235,6 @@ void PageHeap::MergeIntoFreeList(Span* span) { |
Event(span, 'R', len); |
} |
- Event(span, 'D', span->length); |
- span->location = Span::ON_RETURNED_FREELIST; |
- DecommitSpan(span); |
PrependToFreeList(span); |
} |
@@ -403,103 +342,25 @@ void PageHeap::RegisterSizeClass(Span* span, size_t sc) { |
} |
} |
-static double MiB(uint64_t bytes) { |
- return bytes / 1048576.0; |
-} |
- |
-static double PagesToMiB(uint64_t pages) { |
- return (pages << kPageShift) / 1048576.0; |
-} |
- |
-void PageHeap::GetClassSizes(int64 class_sizes_normal[kMaxPages], |
- int64 class_sizes_returned[kMaxPages], |
- int64* normal_pages_in_spans, |
- int64* returned_pages_in_spans) { |
- |
+void PageHeap::GetSmallSpanStats(SmallSpanStats* result) { |
for (int s = 0; s < kMaxPages; s++) { |
- if (class_sizes_normal != NULL) { |
- class_sizes_normal[s] = DLL_Length(&free_[s].normal); |
- } |
- if (class_sizes_returned != NULL) { |
- class_sizes_returned[s] = DLL_Length(&free_[s].returned); |
- } |
- } |
- |
- if (normal_pages_in_spans != NULL) { |
- *normal_pages_in_spans = 0; |
- for (Span* s = large_.normal.next; s != &large_.normal; s = s->next) { |
- *normal_pages_in_spans += s->length;; |
- } |
- } |
- |
- if (returned_pages_in_spans != NULL) { |
- *returned_pages_in_spans = 0; |
- for (Span* s = large_.returned.next; s != &large_.returned; s = s->next) { |
- *returned_pages_in_spans += s->length; |
- } |
+ result->normal_length[s] = DLL_Length(&free_[s].normal); |
+ result->returned_length[s] = DLL_Length(&free_[s].returned); |
} |
} |
-void PageHeap::Dump(TCMalloc_Printer* out) { |
- int nonempty_sizes = 0; |
- for (int s = 0; s < kMaxPages; s++) { |
- if (!DLL_IsEmpty(&free_[s].normal) || !DLL_IsEmpty(&free_[s].returned)) { |
- nonempty_sizes++; |
- } |
- } |
- out->printf("------------------------------------------------\n"); |
- out->printf("PageHeap: %d sizes; %6.1f MiB free; %6.1f MiB unmapped\n", |
- nonempty_sizes, MiB(stats_.free_bytes), |
- MiB(stats_.unmapped_bytes)); |
- out->printf("------------------------------------------------\n"); |
- uint64_t total_normal = 0; |
- uint64_t total_returned = 0; |
- for (int s = 0; s < kMaxPages; s++) { |
- const int n_length = DLL_Length(&free_[s].normal); |
- const int r_length = DLL_Length(&free_[s].returned); |
- if (n_length + r_length > 0) { |
- uint64_t n_pages = s * n_length; |
- uint64_t r_pages = s * r_length; |
- total_normal += n_pages; |
- total_returned += r_pages; |
- out->printf("%6u pages * %6u spans ~ %6.1f MiB; %6.1f MiB cum" |
- "; unmapped: %6.1f MiB; %6.1f MiB cum\n", |
- s, |
- (n_length + r_length), |
- PagesToMiB(n_pages + r_pages), |
- PagesToMiB(total_normal + total_returned), |
- PagesToMiB(r_pages), |
- PagesToMiB(total_returned)); |
- } |
- } |
- |
- uint64_t n_pages = 0; |
- uint64_t r_pages = 0; |
- int n_spans = 0; |
- int r_spans = 0; |
- out->printf("Normal large spans:\n"); |
+void PageHeap::GetLargeSpanStats(LargeSpanStats* result) { |
+ result->spans = 0; |
+ result->normal_pages = 0; |
+ result->returned_pages = 0; |
for (Span* s = large_.normal.next; s != &large_.normal; s = s->next) { |
- out->printf(" [ %6" PRIuPTR " pages ] %6.1f MiB\n", |
- s->length, PagesToMiB(s->length)); |
- n_pages += s->length; |
- n_spans++; |
+ result->normal_pages += s->length;; |
+ result->spans++; |
} |
- out->printf("Unmapped large spans:\n"); |
for (Span* s = large_.returned.next; s != &large_.returned; s = s->next) { |
- out->printf(" [ %6" PRIuPTR " pages ] %6.1f MiB\n", |
- s->length, PagesToMiB(s->length)); |
- r_pages += s->length; |
- r_spans++; |
+ result->returned_pages += s->length; |
+ result->spans++; |
} |
- total_normal += n_pages; |
- total_returned += r_pages; |
- out->printf(">255 large * %6u spans ~ %6.1f MiB; %6.1f MiB cum" |
- "; unmapped: %6.1f MiB; %6.1f MiB cum\n", |
- (n_spans + r_spans), |
- PagesToMiB(n_pages + r_pages), |
- PagesToMiB(total_normal + total_returned), |
- PagesToMiB(r_pages), |
- PagesToMiB(total_returned)); |
} |
bool PageHeap::GetNextRange(PageID start, base::MallocRange* r) { |
@@ -560,7 +421,6 @@ bool PageHeap::GrowHeap(Length n) { |
uint64_t old_system_bytes = stats_.system_bytes; |
stats_.system_bytes += (ask << kPageShift); |
- stats_.committed_bytes += (ask << kPageShift); |
const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; |
ASSERT(p > 0); |
@@ -582,7 +442,6 @@ bool PageHeap::GrowHeap(Length n) { |
Span* span = NewSpan(p, ask); |
RecordSpan(span); |
Delete(span); |
- ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes); |
ASSERT(Check()); |
return true; |
} else { |