Index: src/heap/heap.cc |
diff --git a/src/heap/heap.cc b/src/heap/heap.cc |
index 0de91785849e9258261e859eea2889571472ea8b..4ab218f2de25f02f0aa09494fdc55ec6065d37f8 100644 |
--- a/src/heap/heap.cc |
+++ b/src/heap/heap.cc |
@@ -94,7 +94,6 @@ Heap::Heap() |
contexts_disposed_(0), |
number_of_disposed_maps_(0), |
global_ic_age_(0), |
- scan_on_scavenge_pages_(0), |
new_space_(this), |
old_space_(NULL), |
code_space_(NULL), |
@@ -114,7 +113,6 @@ Heap::Heap() |
old_gen_exhausted_(false), |
optimize_for_memory_usage_(false), |
inline_allocation_disabled_(false), |
- store_buffer_rebuilder_(store_buffer()), |
total_regexp_code_generated_(0), |
tracer_(nullptr), |
high_survival_rate_period_length_(0), |
@@ -454,8 +452,6 @@ void Heap::GarbageCollectionPrologue() { |
ReportStatisticsBeforeGC(); |
#endif // DEBUG |
- store_buffer()->GCPrologue(); |
- |
if (isolate()->concurrent_osr_enabled()) { |
isolate()->optimizing_compile_dispatcher()->AgeBufferedOsrJobs(); |
} |
@@ -639,8 +635,6 @@ void Heap::DeoptMarkedAllocationSites() { |
void Heap::GarbageCollectionEpilogue() { |
- store_buffer()->GCEpilogue(); |
- |
// In release mode, we only zap the from space under heap verification. |
if (Heap::ShouldZapGarbage()) { |
ZapFromSpace(); |
@@ -1552,12 +1546,6 @@ static bool IsUnmodifiedHeapObject(Object** p) { |
} |
-void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page, |
- StoreBufferEvent event) { |
- heap->store_buffer_rebuilder_.Callback(page, event); |
-} |
- |
- |
void PromotionQueue::Initialize() { |
// The last to-space page may be used for promotion queue. On promotion |
// conflict, we use the emergency stack. |
@@ -1690,8 +1678,6 @@ void Heap::Scavenge() { |
// Copy objects reachable from the old generation. |
GCTracer::Scope gc_scope(tracer(), |
GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS); |
- StoreBufferRebuildScope scope(this, store_buffer(), |
- &ScavengeStoreBufferCallback); |
store_buffer()->IteratePointersToNewSpace(&Scavenger::ScavengeObject); |
} |
@@ -1946,8 +1932,6 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor, |
// Promote and process all the to-be-promoted objects. |
{ |
- StoreBufferRebuildScope scope(this, store_buffer(), |
- &ScavengeStoreBufferCallback); |
while (!promotion_queue()->is_empty()) { |
HeapObject* target; |
int size; |
@@ -4495,8 +4479,7 @@ void Heap::IterateAndMarkPointersToFromSpace(HeapObject* object, Address start, |
if (InNewSpace(new_target)) { |
SLOW_DCHECK(Heap::InToSpace(new_target)); |
SLOW_DCHECK(new_target->IsHeapObject()); |
- store_buffer_.EnterDirectlyIntoStoreBuffer( |
- reinterpret_cast<Address>(slot)); |
+ store_buffer_.Mark(reinterpret_cast<Address>(slot)); |
} |
SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target)); |
} else if (record_slots && |
@@ -6106,10 +6089,9 @@ void Heap::FilterStoreBufferEntriesOnAboutToBeFreedPages() { |
MemoryChunk* chunk; |
for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { |
next = chunk->next_chunk(); |
+ chunk->ReleaseOldToNewSlots(); |
Hannes Payer (out of office)
2016/01/20 19:43:00
They should be released in MemoryChunk::ReleaseAll
ulan
2016/01/28 19:07:22
Done.
|
chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED); |
Hannes Payer (out of office)
2016/01/20 19:43:00
ABOUT_TO_BE_FREED should not be needed anymore.
ulan
2016/01/28 19:07:21
Done.
|
} |
- store_buffer()->Compact(); |
- store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED); |
} |