Index: runtime/vm/heap.cc |
diff --git a/runtime/vm/heap.cc b/runtime/vm/heap.cc |
index f73af75a691500ace737cf7f002409c86a3f3011..6f940d3bd06051a199f15d578c2297e82297e2e2 100644 |
--- a/runtime/vm/heap.cc |
+++ b/runtime/vm/heap.cc |
@@ -63,13 +63,13 @@ uword Heap::AllocateNew(intptr_t size) { |
ASSERT(Thread::Current()->no_safepoint_scope_depth() == 0); |
// Currently, only the Dart thread may allocate in new space. |
isolate()->AssertCurrentThreadIsMutator(); |
- uword addr = new_space_.TryAllocate(size); |
+ uword addr = new_space_.TryAllocateInTLAB(Thread::Current(), size); |
if (addr == 0) { |
// This call to CollectGarbage might end up "reusing" a collection spawned |
// from a different thread and will be racing to allocate the requested |
// memory with other threads being released after the collection. |
CollectGarbage(kNew); |
- addr = new_space_.TryAllocate(size); |
+ addr = new_space_.TryAllocateInTLAB(Thread::Current(), size); |
if (addr == 0) { |
return AllocateOld(size, HeapPage::kData); |
} |
@@ -248,11 +248,16 @@ HeapIterationScope::~HeapIterationScope() { |
void Heap::IterateObjects(ObjectVisitor* visitor) const { |
// The visitor must not allocate from the heap. |
NoSafepointScope no_safepoint_scope_; |
- new_space_.VisitObjects(visitor); |
+ IterateNewObjects(visitor); |
IterateOldObjects(visitor); |
} |
+void Heap::IterateNewObjects(ObjectVisitor* visitor) const { |
+ new_space_.VisitObjects(visitor); |
+} |
+ |
+ |
void Heap::IterateOldObjects(ObjectVisitor* visitor) const { |
HeapIterationScope heap_iteration_scope; |
old_space_.VisitObjects(visitor); |
@@ -372,7 +377,7 @@ void Heap::EvacuateNewSpace(Thread* thread, GCReason reason) { |
VMTagScope tagScope(thread, VMTag::kGCNewSpaceTagId); |
TIMELINE_FUNCTION_GC_DURATION(thread, "EvacuateNewGeneration"); |
NOT_IN_PRODUCT(UpdateClassHeapStatsBeforeGC(kNew)); |
- new_space_.Evacuate(); |
+ new_space_.Evacuate(isolate()->mutator_thread()); |
NOT_IN_PRODUCT(isolate()->class_table()->UpdatePromoted()); |
RecordAfterGC(kNew); |
PrintStats(); |
@@ -509,26 +514,6 @@ void Heap::WriteProtect(bool read_only) { |
} |
-intptr_t Heap::TopOffset(Heap::Space space) { |
- if (space == kNew) { |
- return OFFSET_OF(Heap, new_space_) + Scavenger::top_offset(); |
- } else { |
- ASSERT(space == kOld); |
- return OFFSET_OF(Heap, old_space_) + PageSpace::top_offset(); |
- } |
-} |
- |
- |
-intptr_t Heap::EndOffset(Heap::Space space) { |
- if (space == kNew) { |
- return OFFSET_OF(Heap, new_space_) + Scavenger::end_offset(); |
- } else { |
- ASSERT(space == kOld); |
- return OFFSET_OF(Heap, old_space_) + PageSpace::end_offset(); |
- } |
-} |
- |
- |
void Heap::Init(Isolate* isolate, |
intptr_t max_new_gen_words, |
intptr_t max_old_gen_words, |
@@ -607,6 +592,10 @@ bool Heap::Verify(MarkExpectation mark_expectation) const { |
bool Heap::VerifyGC(MarkExpectation mark_expectation) const { |
StackZone stack_zone(Thread::Current()); |
+ |
+ // Synchronize the top_ of the heap's new space with the thread's top_ |
+ new_space_.FlushTLS(); |
+ |
ObjectSet* allocated_set = |
CreateAllocatedObjectSet(stack_zone.GetZone(), mark_expectation); |
VerifyPointersVisitor visitor(isolate(), allocated_set); |