Index: runtime/vm/scavenger.cc |
diff --git a/runtime/vm/scavenger.cc b/runtime/vm/scavenger.cc |
index 34810a109fe21f4382c01d7ad88892b38b329592..e20216c0fe731d505bdac767efc819ad14bb98ff 100644 |
--- a/runtime/vm/scavenger.cc |
+++ b/runtime/vm/scavenger.cc |
@@ -138,7 +138,7 @@ class ScavengerVisitor : public ObjectPointerVisitor { |
if (scavenger_->survivor_end_ <= raw_addr) { |
// Not a survivor of a previous scavenge. Just copy the object into the |
// to space. |
- new_addr = scavenger_->TryAllocate(size); |
+ new_addr = scavenger_->TryAllocateGC(size); |
NOT_IN_PRODUCT(class_table->UpdateLiveNew(cid, size)); |
} else { |
// TODO(iposva): Experiment with less aggressive promotion. For example |
@@ -158,7 +158,7 @@ class ScavengerVisitor : public ObjectPointerVisitor { |
} else { |
// Promotion did not succeed. Copy into the to space instead. |
scavenger_->failed_to_promote_ = true; |
- new_addr = scavenger_->TryAllocate(size); |
+ new_addr = scavenger_->TryAllocateGC(size); |
NOT_IN_PRODUCT(class_table->UpdateLiveNew(cid, size)); |
} |
} |
@@ -340,7 +340,8 @@ Scavenger::Scavenger(Heap* heap, |
gc_time_micros_(0), |
collections_(0), |
external_size_(0), |
- failed_to_promote_(false) { |
+ failed_to_promote_(false), |
+ space_lock_(new Mutex()) { |
// Verify assumptions about the first word in objects which the scavenger is |
// going to use for forwarding pointers. |
ASSERT(Object::tags_offset() == 0); |
@@ -372,6 +373,7 @@ Scavenger::Scavenger(Heap* heap, |
Scavenger::~Scavenger() { |
ASSERT(!scavenging_); |
to_->Delete(); |
+ delete space_lock_; |
} |
@@ -394,6 +396,12 @@ SemiSpace* Scavenger::Prologue(Isolate* isolate, bool invoke_api_callbacks) { |
(isolate->gc_prologue_callback())(); |
} |
isolate->PrepareForGC(); |
+ |
+ Thread* thread = Thread::Current(); |
rmacnak
2017/07/12 17:10:22
Why not always use `isolate->mutator_thread()`?
danunez
2017/07/12 18:23:38
Looks like I missed one.
|
+ if (!thread->IsMutatorThread()) { |
+ thread = isolate->mutator_thread(); |
+ } |
+ |
// Flip the two semi-spaces so that to_ is always the space for allocating |
// objects. |
SemiSpace* from = to_; |
@@ -411,6 +419,12 @@ SemiSpace* Scavenger::Prologue(Isolate* isolate, bool invoke_api_callbacks) { |
top_ = FirstObjectStart(); |
resolved_top_ = top_; |
end_ = to_->end(); |
+ |
+ if (thread->heap() == heap_) { |
rmacnak
2017/07/12 17:10:22
We should never scavenge another isolate's heap. (
danunez
2017/07/12 18:23:38
As discussed, will replace with a check if the mut
|
+ thread->set_top(top_); |
+ thread->set_end(end_); |
+ } |
+ |
return from; |
} |
@@ -420,6 +434,18 @@ void Scavenger::Epilogue(Isolate* isolate, |
bool invoke_api_callbacks) { |
// All objects in the to space have been copied from the from space at this |
// moment. |
+ |
+ Thread* thread = Thread::Current(); |
+ |
+ if (!thread->IsMutatorThread()) { |
+ thread = isolate->mutator_thread(); |
+ } |
+ |
+ if (thread->heap() == heap_) { |
rmacnak
2017/07/12 17:10:22
We should never scavenge another isolate's heap. (
danunez
2017/07/12 18:23:38
Done.
|
+ thread->set_top(top_); |
+ thread->set_end(end_); |
+ } |
+ |
double avg_frac = stats_history_.Get(0).PromoCandidatesSuccessFraction(); |
if (stats_history_.Size() >= 2) { |
// Previous scavenge is only given half as much weight. |
@@ -556,6 +582,7 @@ void Scavenger::IterateWeakRoots(Isolate* isolate, HandleVisitor* visitor) { |
void Scavenger::ProcessToSpace(ScavengerVisitor* visitor) { |
// Iterate until all work has been drained. |
+ |
rmacnak
2017/07/12 17:10:22
Spurious whitespace changes.
danunez
2017/07/12 18:23:38
Done.
|
while ((resolved_top_ < top_) || PromotedStackHasMore()) { |
while (resolved_top_ < top_) { |
RawObject* raw_obj = RawObject::FromAddr(resolved_top_); |
@@ -608,6 +635,7 @@ void Scavenger::ProcessToSpace(ScavengerVisitor* visitor) { |
} else { |
EnqueueWeakProperty(cur_weak); |
} |
+ |
// Advance to next weak property in the queue. |
cur_weak = reinterpret_cast<RawWeakProperty*>(next_weak); |
} |
@@ -732,7 +760,18 @@ void Scavenger::ProcessWeakReferences() { |
} |
+void Scavenger::FlushTLS() const { |
+ if (heap_ != NULL) { |
+ Thread* mutator_thread = heap_->isolate()->mutator_thread(); |
+ |
+ if (mutator_thread->heap() == heap_) { |
+ mutator_thread->heap()->new_space()->set_top(mutator_thread->top()); |
+ } |
+ } |
+} |
+ |
void Scavenger::VisitObjectPointers(ObjectPointerVisitor* visitor) const { |
+ FlushTLS(); |
uword cur = FirstObjectStart(); |
while (cur < top_) { |
RawObject* raw_obj = RawObject::FromAddr(cur); |
@@ -742,6 +781,7 @@ void Scavenger::VisitObjectPointers(ObjectPointerVisitor* visitor) const { |
void Scavenger::VisitObjects(ObjectVisitor* visitor) const { |
+ FlushTLS(); |
uword cur = FirstObjectStart(); |
while (cur < top_) { |
RawObject* raw_obj = RawObject::FromAddr(cur); |
@@ -758,7 +798,9 @@ void Scavenger::AddRegionsToObjectSet(ObjectSet* set) const { |
RawObject* Scavenger::FindObject(FindObjectVisitor* visitor) const { |
ASSERT(!scavenging_); |
+ FlushTLS(); |
uword cur = FirstObjectStart(); |
+ |
if (visitor->VisitRange(cur, top_)) { |
while (cur < top_) { |
RawObject* raw_obj = RawObject::FromAddr(cur); |
@@ -768,7 +810,6 @@ RawObject* Scavenger::FindObject(FindObjectVisitor* visitor) const { |
} |
cur = next; |
} |
- ASSERT(cur == top_); |
} |
return Object::null(); |
} |
@@ -910,7 +951,7 @@ void Scavenger::FreeExternal(intptr_t size) { |
} |
-void Scavenger::Evacuate() { |
+void Scavenger::Evacuate(Thread* thread) { |
rmacnak
2017/07/12 17:10:22
It looks like all callers pass isolate()->mutator_
danunez
2017/07/12 18:23:38
You are right. I will modify this function to grab
|
// We need a safepoint here to prevent allocation right before or right after |
// the scavenge. |
// The former can introduce an object that we might fail to collect. |
@@ -921,6 +962,11 @@ void Scavenger::Evacuate() { |
// Forces the next scavenge to promote all the objects in the new space. |
survivor_end_ = top_; |
+ |
+ if (thread->heap() != NULL) { |
rmacnak
2017/07/12 17:10:22
// Null in some unit tests. (Presumably)
danunez
2017/07/12 18:23:38
As stated above, will replace with a function that
|
+ survivor_end_ = thread->top(); |
+ } |
+ |
Scavenge(); |
// It is possible for objects to stay in the new space |