Index: src/heap.cc |
diff --git a/src/heap.cc b/src/heap.cc |
index ff7a9ff12f20d6f7e37751f7d2bd91fb26939684..8254f61efbc336baa92f5d4f1c7ac863ba9a6a01 100644 |
--- a/src/heap.cc |
+++ b/src/heap.cc |
@@ -101,6 +101,7 @@ Heap::Heap() |
promotion_rate_(0), |
semi_space_copied_object_size_(0), |
semi_space_copied_rate_(0), |
+ changed_to_max_semi_space_size_(false), |
max_gc_pause_(0.0), |
total_gc_time_ms_(0.0), |
max_alive_after_gc_(0), |
@@ -438,6 +439,8 @@ void Heap::GarbageCollectionPrologue() { |
if (isolate()->concurrent_osr_enabled()) { |
isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs(); |
} |
+ |
+ CheckNewSpaceExpansionCriteria(); |
} |
@@ -485,8 +488,13 @@ void Heap::ProcessPretenuringFeedback() { |
// If the scratchpad overflowed, we have to iterate over the allocation |
// sites list. |
+ // TODO(hpayer): We iterate over the whole list of allocation sites when |
+ // we grew to the maximum semi-space size to deopt maybe tenured |
+ // allocation sites. We could hold the maybe tenured allocation sites |
+ // in a seperate data structure if this is a performance problem. |
bool use_scratchpad = |
- allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize; |
+ allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize && |
+ !changed_to_max_semi_space_size_; |
int i = 0; |
Object* list_element = allocation_sites_list(); |
@@ -500,14 +508,15 @@ void Heap::ProcessPretenuringFeedback() { |
allocation_mementos_found += site->memento_found_count(); |
if (site->memento_found_count() > 0) { |
active_allocation_sites++; |
+ if (site->DigestPretenuringFeedback()) trigger_deoptimization = true; |
+ if (site->GetPretenureMode() == TENURED) { |
+ tenure_decisions++; |
+ } else { |
+ dont_tenure_decisions++; |
+ } |
+ allocation_sites++; |
} |
- if (site->DigestPretenuringFeedback()) trigger_deoptimization = true; |
- if (site->GetPretenureMode() == TENURED) { |
- tenure_decisions++; |
- } else { |
- dont_tenure_decisions++; |
- } |
- allocation_sites++; |
+ |
if (use_scratchpad) { |
i++; |
} else { |
@@ -1283,6 +1292,7 @@ static void VerifyNonPointerSpacePointers(Heap* heap) { |
void Heap::CheckNewSpaceExpansionCriteria() { |
+ changed_to_max_semi_space_size_ = false; |
if (new_space_.Capacity() < new_space_.MaximumCapacity() && |
survived_since_last_expansion_ > new_space_.Capacity()) { |
// Grow the size of new space if there is room to grow, enough data |
@@ -1290,6 +1300,9 @@ void Heap::CheckNewSpaceExpansionCriteria() { |
// high promotion mode. |
new_space_.Grow(); |
survived_since_last_expansion_ = 0; |
+ if (new_space_.IsAtMaximumCapacity()) { |
+ changed_to_max_semi_space_size_ = true; |
mvstanton
2014/06/02 13:31:25
Is the boolean really needed? Why not just ask new
Hannes Payer (out of office)
2014/06/02 14:56:52
As discussed offline, we need a separate flag to i
|
+ } |
} |
} |
@@ -1433,8 +1446,6 @@ void Heap::Scavenge() { |
// Used for updating survived_since_last_expansion_ at function end. |
intptr_t survived_watermark = PromotedSpaceSizeOfObjects(); |
- CheckNewSpaceExpansionCriteria(); |
- |
SelectScavengingVisitorsTable(); |
incremental_marking()->PrepareForScavenge(); |