Index: src/heap/spaces.cc |
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc |
index 9bba6dd89b9cfe6e3e3ba0933d6d89b87dfc78ca..06618cdc726b0bde4cf153523e835756fc57f392 100644 |
--- a/src/heap/spaces.cc |
+++ b/src/heap/spaces.cc |
@@ -1415,11 +1415,9 @@ void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) { |
Address high = to_space_.page_high(); |
Address new_top = allocation_info_.top() + size_in_bytes; |
allocation_info_.set_limit(Min(new_top, high)); |
- } else if (inline_allocation_limit_step() == 0) { |
- // Normal limit is the end of the current page. |
- allocation_info_.set_limit(to_space_.page_high()); |
} else { |
// Lower limit during incremental marking. |
+ DCHECK(inline_allocation_limit_step_ != 0); |
Address high = to_space_.page_high(); |
Address new_top = allocation_info_.top() + size_in_bytes; |
Address new_limit = new_top + inline_allocation_limit_step_; |
@@ -1502,11 +1500,13 @@ bool NewSpace::EnsureAllocation(int size_in_bytes, |
if (allocation_info_.limit() < high) { |
// Either the limit has been lowered because linear allocation was disabled |
- // or because incremental marking wants to get a chance to do a step. Set |
- // the new limit accordingly. |
+ // or because incremental marking wants to get a chance to do a step, |
+ // or because idle scavenge job wants to get a chance to post a task. |
+ // Set the new limit accordingly. |
if (top_on_previous_step_) { |
Address new_top = old_top + aligned_size_in_bytes; |
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_); |
+ heap()->ScheduleIdleScavengeIfNeeded(bytes_allocated); |
heap()->incremental_marking()->Step( |
bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD); |
top_on_previous_step_ = new_top; |