Index: src/heap/mark-compact.cc |
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc |
index 57c93366b845a812a6f2193a1ea4c22a6d911b6e..a94d4782ee53cfebb0692fe083e6e12fd9604a7f 100644 |
--- a/src/heap/mark-compact.cc |
+++ b/src/heap/mark-compact.cc |
@@ -595,11 +595,11 @@ const char* AllocationSpaceName(AllocationSpace space) { |
static int FreeListFragmentation(PagedSpace* space, Page* p) { |
// If page was not swept then there are no free list items on it. |
if (!p->WasSwept()) { |
- if (FLAG_trace_fragmentation) { |
+ if (FLAG_trace_fragmentation_verbose) { |
PrintF("%p [%s]: %d bytes live (unswept)\n", reinterpret_cast<void*>(p), |
AllocationSpaceName(space->identity()), p->LiveBytes()); |
} |
- return 0; |
+ return FLAG_always_compact ? 1 : 0; |
} |
PagedSpace::SizeStats sizes; |
@@ -616,7 +616,7 @@ static int FreeListFragmentation(PagedSpace* space, Page* p) { |
ratio_threshold = 15; |
} |
- if (FLAG_trace_fragmentation) { |
+ if (FLAG_trace_fragmentation_verbose) { |
PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n", |
reinterpret_cast<void*>(p), AllocationSpaceName(space->identity()), |
static_cast<int>(sizes.small_size_), |
@@ -645,7 +645,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { |
space->identity() == OLD_DATA_SPACE || |
space->identity() == CODE_SPACE); |
- static const int kMaxMaxEvacuationCandidates = 1000; |
+ static const int kMaxMaxEvacuationCandidates = 10000; |
ulan
2015/03/20 10:56:53
Does this help? I think 1000 pages (1GB) should be
Erik Corry
2015/03/20 11:39:23
That rather depends on the total heap size. Not e
|
int number_of_pages = space->CountTotalPages(); |
int max_evacuation_candidates = |
static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1); |
@@ -692,6 +692,10 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { |
max_evacuation_candidates *= 2; |
} |
+ if (FLAG_always_compact) { |
+ max_evacuation_candidates = kMaxMaxEvacuationCandidates; |
+ } |
+ |
if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) { |
PrintF( |
"Estimated over reserved memory: %.1f / %.1f MB (threshold %d), " |
@@ -723,7 +727,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { |
unsigned int counter = space->heap()->ms_count(); |
uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits; |
if ((counter & 1) == (page_number & 1)) fragmentation = 1; |
- } else if (mode == REDUCE_MEMORY_FOOTPRINT) { |
+ } else if (mode == REDUCE_MEMORY_FOOTPRINT && !FLAG_always_compact) { |
// Don't try to release too many pages. |
if (estimated_release >= over_reserved) { |
continue; |
@@ -748,7 +752,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { |
fragmentation = 0; |
} |
- if (FLAG_trace_fragmentation) { |
+ if (FLAG_trace_fragmentation_verbose) { |
PrintF("%p [%s]: %d (%.2f%%) free %s\n", reinterpret_cast<void*>(p), |
AllocationSpaceName(space->identity()), |
static_cast<int>(free_bytes), |
@@ -3039,6 +3043,7 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) { |
void MarkCompactCollector::EvacuatePages() { |
int npages = evacuation_candidates_.length(); |
+ int abandoned_pages = 0; |
for (int i = 0; i < npages; i++) { |
Page* p = evacuation_candidates_[i]; |
DCHECK(p->IsEvacuationCandidate() || |
@@ -3054,7 +3059,7 @@ void MarkCompactCollector::EvacuatePages() { |
if (p->IsEvacuationCandidate()) { |
// During compaction we might have to request a new page. Check that we |
// have an emergency page and the space still has room for that. |
- if (space->HasEmergencyMemory() && space->CanExpand()) { |
+ if (space->HasEmergencyMemory() || space->CanExpand()) { |
ulan
2015/03/20 10:56:54
Good catch!
|
EvacuateLiveObjectsFromPage(p); |
} else { |
// Without room for expansion evacuation is not guaranteed to succeed. |
@@ -3065,6 +3070,7 @@ void MarkCompactCollector::EvacuatePages() { |
page->ClearEvacuationCandidate(); |
page->SetFlag(Page::RESCAN_ON_EVACUATION); |
} |
+ abandoned_pages = npages - i; |
break; |
} |
} |
@@ -3078,6 +3084,16 @@ void MarkCompactCollector::EvacuatePages() { |
space->FreeEmergencyMemory(); |
} |
} |
+ if (FLAG_trace_fragmentation) { |
+ if (abandoned_pages != 0) { |
+ PrintF( |
+ " Abandon %d out of %d page defragmentations due to lack of " |
+ "memory\n", |
+ abandoned_pages, npages); |
+ } else { |
+ PrintF(" Defragmented %d pages\n", npages); |
+ } |
+ } |
} |
} |
@@ -3431,7 +3447,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED); |
SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_, |
code_slots_filtering_required); |
- if (FLAG_trace_fragmentation) { |
+ if (FLAG_trace_fragmentation_verbose) { |
PrintF(" migration slots buffer: %d\n", |
SlotsBuffer::SizeOfChain(migration_slots_buffer_)); |
} |
@@ -3466,7 +3482,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
if (p->IsEvacuationCandidate()) { |
SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer(), |
code_slots_filtering_required); |
- if (FLAG_trace_fragmentation) { |
+ if (FLAG_trace_fragmentation_verbose) { |
PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p), |
SlotsBuffer::SizeOfChain(p->slots_buffer())); |
} |