OLD | NEW |
---|---|
1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/scavenger.h" | 5 #include "vm/scavenger.h" |
6 | 6 |
7 #include "vm/dart.h" | 7 #include "vm/dart.h" |
8 #include "vm/dart_api_state.h" | 8 #include "vm/dart_api_state.h" |
9 #include "vm/isolate.h" | 9 #include "vm/isolate.h" |
10 #include "vm/lockers.h" | 10 #include "vm/lockers.h" |
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
131 // Get the new location of the object. | 131 // Get the new location of the object. |
132 new_addr = ForwardedAddr(header); | 132 new_addr = ForwardedAddr(header); |
133 } else { | 133 } else { |
134 intptr_t size = raw_obj->Size(); | 134 intptr_t size = raw_obj->Size(); |
135 NOT_IN_PRODUCT(intptr_t cid = raw_obj->GetClassId()); | 135 NOT_IN_PRODUCT(intptr_t cid = raw_obj->GetClassId()); |
136 NOT_IN_PRODUCT(ClassTable* class_table = isolate()->class_table()); | 136 NOT_IN_PRODUCT(ClassTable* class_table = isolate()->class_table()); |
137 // Check whether object should be promoted. | 137 // Check whether object should be promoted. |
138 if (scavenger_->survivor_end_ <= raw_addr) { | 138 if (scavenger_->survivor_end_ <= raw_addr) { |
139 // Not a survivor of a previous scavenge. Just copy the object into the | 139 // Not a survivor of a previous scavenge. Just copy the object into the |
140 // to space. | 140 // to space. |
141 new_addr = scavenger_->TryAllocate(size); | 141 new_addr = scavenger_->TryAllocate(size); |
rmacnak
2017/07/05 17:39:52
Can we use a separate allocation function here? Du
danunez
2017/07/05 18:12:55
I will add a function to do allocation using the t
| |
142 NOT_IN_PRODUCT(class_table->UpdateLiveNew(cid, size)); | 142 NOT_IN_PRODUCT(class_table->UpdateLiveNew(cid, size)); |
143 } else { | 143 } else { |
144 // TODO(iposva): Experiment with less aggressive promotion. For example | 144 // TODO(iposva): Experiment with less aggressive promotion. For example |
145 // a coin toss determines if an object is promoted or whether it should | 145 // a coin toss determines if an object is promoted or whether it should |
146 // survive in this generation. | 146 // survive in this generation. |
147 // | 147 // |
148 // This object is a survivor of a previous scavenge. Attempt to promote | 148 // This object is a survivor of a previous scavenge. Attempt to promote |
149 // the object. | 149 // the object. |
150 new_addr = | 150 new_addr = |
151 page_space_->TryAllocatePromoLocked(size, PageSpace::kForceGrowth); | 151 page_space_->TryAllocatePromoLocked(size, PageSpace::kForceGrowth); |
152 if (new_addr != 0) { | 152 if (new_addr != 0) { |
153 // If promotion succeeded then we need to remember it so that it can | 153 // If promotion succeeded then we need to remember it so that it can |
154 // be traversed later. | 154 // be traversed later. |
155 scavenger_->PushToPromotedStack(new_addr); | 155 scavenger_->PushToPromotedStack(new_addr); |
156 bytes_promoted_ += size; | 156 bytes_promoted_ += size; |
157 NOT_IN_PRODUCT(class_table->UpdateAllocatedOld(cid, size)); | 157 NOT_IN_PRODUCT(class_table->UpdateAllocatedOld(cid, size)); |
158 } else { | 158 } else { |
159 // Promotion did not succeed. Copy into the to space instead. | 159 // Promotion did not succeed. Copy into the to space instead. |
160 new_addr = scavenger_->TryAllocate(size); | 160 new_addr = scavenger_->TryAllocate(size); |
rmacnak
2017/07/05 17:39:52
And here.
danunez
2017/07/05 18:12:55
Ditto.
| |
161 NOT_IN_PRODUCT(class_table->UpdateLiveNew(cid, size)); | 161 NOT_IN_PRODUCT(class_table->UpdateLiveNew(cid, size)); |
162 } | 162 } |
163 } | 163 } |
164 // During a scavenge we always succeed to at least copy all of the | 164 // During a scavenge we always succeed to at least copy all of the |
165 // current objects to the to space. | 165 // current objects to the to space. |
166 ASSERT(new_addr != 0); | 166 ASSERT(new_addr != 0); |
167 // Copy the object to the new location. | 167 // Copy the object to the new location. |
168 memmove(reinterpret_cast<void*>(new_addr), | 168 memmove(reinterpret_cast<void*>(new_addr), |
169 reinterpret_cast<void*>(raw_addr), size); | 169 reinterpret_cast<void*>(raw_addr), size); |
170 // Remember forwarding address. | 170 // Remember forwarding address. |
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
331 Scavenger::Scavenger(Heap* heap, | 331 Scavenger::Scavenger(Heap* heap, |
332 intptr_t max_semi_capacity_in_words, | 332 intptr_t max_semi_capacity_in_words, |
333 uword object_alignment) | 333 uword object_alignment) |
334 : heap_(heap), | 334 : heap_(heap), |
335 max_semi_capacity_in_words_(max_semi_capacity_in_words), | 335 max_semi_capacity_in_words_(max_semi_capacity_in_words), |
336 object_alignment_(object_alignment), | 336 object_alignment_(object_alignment), |
337 scavenging_(false), | 337 scavenging_(false), |
338 delayed_weak_properties_(NULL), | 338 delayed_weak_properties_(NULL), |
339 gc_time_micros_(0), | 339 gc_time_micros_(0), |
340 collections_(0), | 340 collections_(0), |
341 external_size_(0) { | 341 external_size_(0), |
342 space_lock_(new Mutex()) { | |
342 // Verify assumptions about the first word in objects which the scavenger is | 343 // Verify assumptions about the first word in objects which the scavenger is |
343 // going to use for forwarding pointers. | 344 // going to use for forwarding pointers. |
344 ASSERT(Object::tags_offset() == 0); | 345 ASSERT(Object::tags_offset() == 0); |
345 | 346 |
346 // Set initial size resulting in a total of three different levels. | 347 // Set initial size resulting in a total of three different levels. |
347 const intptr_t initial_semi_capacity_in_words = | 348 const intptr_t initial_semi_capacity_in_words = |
348 max_semi_capacity_in_words / | 349 max_semi_capacity_in_words / |
349 (FLAG_new_gen_growth_factor * FLAG_new_gen_growth_factor); | 350 (FLAG_new_gen_growth_factor * FLAG_new_gen_growth_factor); |
350 | 351 |
351 const intptr_t kVmNameSize = 128; | 352 const intptr_t kVmNameSize = 128; |
(...skipping 11 matching lines...) Expand all Loading... | |
363 survivor_end_ = FirstObjectStart(); | 364 survivor_end_ = FirstObjectStart(); |
364 | 365 |
365 UpdateMaxHeapCapacity(); | 366 UpdateMaxHeapCapacity(); |
366 UpdateMaxHeapUsage(); | 367 UpdateMaxHeapUsage(); |
367 } | 368 } |
368 | 369 |
369 | 370 |
370 Scavenger::~Scavenger() { | 371 Scavenger::~Scavenger() { |
371 ASSERT(!scavenging_); | 372 ASSERT(!scavenging_); |
372 to_->Delete(); | 373 to_->Delete(); |
374 delete space_lock_; | |
373 } | 375 } |
374 | 376 |
375 | 377 |
376 intptr_t Scavenger::NewSizeInWords(intptr_t old_size_in_words) const { | 378 intptr_t Scavenger::NewSizeInWords(intptr_t old_size_in_words) const { |
377 if (stats_history_.Size() == 0) { | 379 if (stats_history_.Size() == 0) { |
378 return old_size_in_words; | 380 return old_size_in_words; |
379 } | 381 } |
380 double garbage = stats_history_.Get(0).GarbageFraction(); | 382 double garbage = stats_history_.Get(0).GarbageFraction(); |
381 if (garbage < (FLAG_new_gen_garbage_threshold / 100.0)) { | 383 if (garbage < (FLAG_new_gen_garbage_threshold / 100.0)) { |
382 return Utils::Minimum(max_semi_capacity_in_words_, | 384 return Utils::Minimum(max_semi_capacity_in_words_, |
383 old_size_in_words * FLAG_new_gen_growth_factor); | 385 old_size_in_words * FLAG_new_gen_growth_factor); |
384 } else { | 386 } else { |
385 return old_size_in_words; | 387 return old_size_in_words; |
386 } | 388 } |
387 } | 389 } |
388 | 390 |
389 | 391 |
390 SemiSpace* Scavenger::Prologue(Isolate* isolate, bool invoke_api_callbacks) { | 392 SemiSpace* Scavenger::Prologue(Isolate* isolate, bool invoke_api_callbacks) { |
391 if (invoke_api_callbacks && (isolate->gc_prologue_callback() != NULL)) { | 393 if (invoke_api_callbacks && (isolate->gc_prologue_callback() != NULL)) { |
392 (isolate->gc_prologue_callback())(); | 394 (isolate->gc_prologue_callback())(); |
393 } | 395 } |
394 isolate->PrepareForGC(); | 396 isolate->PrepareForGC(); |
397 | |
398 Thread* thread = Thread::Current(); | |
rmacnak
2017/07/05 17:39:53
How about
Thread* mutator = isolate->mutator_thre
danunez
2017/07/05 18:12:55
Done.
| |
399 if (!thread->IsMutatorThread()) { | |
400 thread = isolate->mutator_thread(); | |
401 } | |
402 | |
395 // Flip the two semi-spaces so that to_ is always the space for allocating | 403 // Flip the two semi-spaces so that to_ is always the space for allocating |
396 // objects. | 404 // objects. |
397 SemiSpace* from = to_; | 405 SemiSpace* from = to_; |
398 | 406 |
399 const intptr_t kVmNameSize = 128; | 407 const intptr_t kVmNameSize = 128; |
400 char vm_name[kVmNameSize]; | 408 char vm_name[kVmNameSize]; |
401 Heap::RegionName(heap_, Heap::kNew, vm_name, kVmNameSize); | 409 Heap::RegionName(heap_, Heap::kNew, vm_name, kVmNameSize); |
402 to_ = SemiSpace::New(NewSizeInWords(from->size_in_words()), vm_name); | 410 to_ = SemiSpace::New(NewSizeInWords(from->size_in_words()), vm_name); |
403 if (to_ == NULL) { | 411 if (to_ == NULL) { |
404 // TODO(koda): We could try to recover (collect old space, wait for another | 412 // TODO(koda): We could try to recover (collect old space, wait for another |
405 // isolate to finish scavenge, etc.). | 413 // isolate to finish scavenge, etc.). |
406 OUT_OF_MEMORY(); | 414 OUT_OF_MEMORY(); |
407 } | 415 } |
408 UpdateMaxHeapCapacity(); | 416 UpdateMaxHeapCapacity(); |
409 top_ = FirstObjectStart(); | 417 top_ = FirstObjectStart(); |
410 resolved_top_ = top_; | 418 resolved_top_ = top_; |
411 end_ = to_->end(); | 419 end_ = to_->end(); |
420 | |
421 if (thread->heap() != NULL) { | |
422 thread->set_top_offset(top_); | |
423 thread->set_end_offset(end_); | |
424 } | |
425 | |
412 return from; | 426 return from; |
413 } | 427 } |
414 | 428 |
415 | 429 |
416 void Scavenger::Epilogue(Isolate* isolate, | 430 void Scavenger::Epilogue(Isolate* isolate, |
417 SemiSpace* from, | 431 SemiSpace* from, |
418 bool invoke_api_callbacks) { | 432 bool invoke_api_callbacks) { |
419 // All objects in the to space have been copied from the from space at this | 433 // All objects in the to space have been copied from the from space at this |
420 // moment. | 434 // moment. |
435 | |
436 Thread* thread = Thread::Current(); | |
437 | |
438 if (!thread->IsMutatorThread()) { | |
439 thread = isolate->mutator_thread(); | |
440 } | |
441 uword top = 0; | |
442 uword end = 0; | |
443 | |
444 | |
445 if (thread->heap() != NULL) { | |
446 top = thread->top(); | |
447 ASSERT(thread->end() == end_); | |
448 end = thread->end(); | |
449 } else { | |
450 top = top_; | |
451 end = end_; | |
452 } | |
453 | |
421 double avg_frac = stats_history_.Get(0).PromoCandidatesSuccessFraction(); | 454 double avg_frac = stats_history_.Get(0).PromoCandidatesSuccessFraction(); |
422 if (stats_history_.Size() >= 2) { | 455 if (stats_history_.Size() >= 2) { |
423 // Previous scavenge is only given half as much weight. | 456 // Previous scavenge is only given half as much weight. |
424 avg_frac += 0.5 * stats_history_.Get(1).PromoCandidatesSuccessFraction(); | 457 avg_frac += 0.5 * stats_history_.Get(1).PromoCandidatesSuccessFraction(); |
425 avg_frac /= 1.0 + 0.5; // Normalize. | 458 avg_frac /= 1.0 + 0.5; // Normalize. |
426 } | 459 } |
427 if (avg_frac < (FLAG_early_tenuring_threshold / 100.0)) { | 460 if (avg_frac < (FLAG_early_tenuring_threshold / 100.0)) { |
428 // Remember the limit to which objects have been copied. | 461 // Remember the limit to which objects have been copied. |
429 survivor_end_ = top_; | 462 survivor_end_ = top; |
430 } else { | 463 } else { |
431 // Move survivor end to the end of the to_ space, making all surviving | 464 // Move survivor end to the end of the to_ space, making all surviving |
432 // objects candidates for promotion next time. | 465 // objects candidates for promotion next time. |
433 survivor_end_ = end_; | 466 survivor_end_ = end; |
434 } | 467 } |
435 #if defined(DEBUG) | 468 #if defined(DEBUG) |
436 // We can only safely verify the store buffers from old space if there is no | 469 // We can only safely verify the store buffers from old space if there is no |
437 // concurrent old space task. At the same time we prevent new tasks from | 470 // concurrent old space task. At the same time we prevent new tasks from |
438 // being spawned. | 471 // being spawned. |
439 { | 472 { |
440 PageSpace* page_space = heap_->old_space(); | 473 PageSpace* page_space = heap_->old_space(); |
441 MonitorLocker ml(page_space->tasks_lock()); | 474 MonitorLocker ml(page_space->tasks_lock()); |
442 if (page_space->tasks() == 0) { | 475 if (page_space->tasks() == 0) { |
443 VerifyStoreBufferPointerVisitor verify_store_buffer_visitor(isolate, to_); | 476 VerifyStoreBufferPointerVisitor verify_store_buffer_visitor(isolate, to_); |
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
547 } | 580 } |
548 | 581 |
549 | 582 |
550 void Scavenger::IterateWeakRoots(Isolate* isolate, HandleVisitor* visitor) { | 583 void Scavenger::IterateWeakRoots(Isolate* isolate, HandleVisitor* visitor) { |
551 isolate->VisitWeakPersistentHandles(visitor); | 584 isolate->VisitWeakPersistentHandles(visitor); |
552 } | 585 } |
553 | 586 |
554 | 587 |
555 void Scavenger::ProcessToSpace(ScavengerVisitor* visitor) { | 588 void Scavenger::ProcessToSpace(ScavengerVisitor* visitor) { |
556 // Iterate until all work has been drained. | 589 // Iterate until all work has been drained. |
557 while ((resolved_top_ < top_) || PromotedStackHasMore()) { | 590 |
558 while (resolved_top_ < top_) { | 591 uword top = 0; |
592 Thread* thread = Thread::Current(); | |
593 | |
594 if (!thread->IsMutatorThread()) { | |
595 thread = Isolate::Current()->mutator_thread(); | |
596 } | |
597 | |
598 if (thread->heap() != NULL) { | |
599 top = thread->top(); | |
600 } else { | |
601 top = top_; | |
602 } | |
603 | |
604 while ((resolved_top_ < top) || PromotedStackHasMore()) { | |
605 while (resolved_top_ < top) { | |
559 RawObject* raw_obj = RawObject::FromAddr(resolved_top_); | 606 RawObject* raw_obj = RawObject::FromAddr(resolved_top_); |
560 intptr_t class_id = raw_obj->GetClassId(); | 607 intptr_t class_id = raw_obj->GetClassId(); |
561 if (class_id != kWeakPropertyCid) { | 608 if (class_id != kWeakPropertyCid) { |
562 resolved_top_ += raw_obj->VisitPointersNonvirtual(visitor); | 609 resolved_top_ += raw_obj->VisitPointersNonvirtual(visitor); |
563 } else { | 610 } else { |
564 RawWeakProperty* raw_weak = reinterpret_cast<RawWeakProperty*>(raw_obj); | 611 RawWeakProperty* raw_weak = reinterpret_cast<RawWeakProperty*>(raw_obj); |
565 resolved_top_ += ProcessWeakProperty(raw_weak, visitor); | 612 resolved_top_ += ProcessWeakProperty(raw_weak, visitor); |
566 } | 613 } |
614 | |
615 if (thread->heap() != NULL) { | |
616 top = thread->top(); | |
617 } else { | |
618 top = top_; | |
619 } | |
567 } | 620 } |
568 { | 621 { |
569 // Visit all the promoted objects and update/scavenge their internal | 622 // Visit all the promoted objects and update/scavenge their internal |
570 // pointers. Potentially this adds more objects to the to space. | 623 // pointers. Potentially this adds more objects to the to space. |
571 while (PromotedStackHasMore()) { | 624 while (PromotedStackHasMore()) { |
572 RawObject* raw_object = RawObject::FromAddr(PopFromPromotedStack()); | 625 RawObject* raw_object = RawObject::FromAddr(PopFromPromotedStack()); |
573 // Resolve or copy all objects referred to by the current object. This | 626 // Resolve or copy all objects referred to by the current object. This |
574 // can potentially push more objects on this stack as well as add more | 627 // can potentially push more objects on this stack as well as add more |
575 // objects to be resolved in the to space. | 628 // objects to be resolved in the to space. |
576 ASSERT(!raw_object->IsRemembered()); | 629 ASSERT(!raw_object->IsRemembered()); |
577 visitor->VisitingOldObject(raw_object); | 630 visitor->VisitingOldObject(raw_object); |
578 raw_object->VisitPointersNonvirtual(visitor); | 631 raw_object->VisitPointersNonvirtual(visitor); |
632 | |
633 if (thread->heap() != NULL) { | |
634 top = thread->top(); | |
635 } else { | |
636 top = top_; | |
637 } | |
579 } | 638 } |
580 visitor->VisitingOldObject(NULL); | 639 visitor->VisitingOldObject(NULL); |
581 } | 640 } |
582 { | 641 { |
583 // Finished this round of scavenging. Process the pending weak properties | 642 // Finished this round of scavenging. Process the pending weak properties |
584 // for which the keys have become reachable. Potentially this adds more | 643 // for which the keys have become reachable. Potentially this adds more |
585 // objects to the to space. | 644 // objects to the to space. |
586 RawWeakProperty* cur_weak = delayed_weak_properties_; | 645 RawWeakProperty* cur_weak = delayed_weak_properties_; |
587 delayed_weak_properties_ = NULL; | 646 delayed_weak_properties_ = NULL; |
588 while (cur_weak != NULL) { | 647 while (cur_weak != NULL) { |
(...skipping 10 matching lines...) Expand all Loading... | |
599 uword raw_addr = RawObject::ToAddr(raw_key); | 658 uword raw_addr = RawObject::ToAddr(raw_key); |
600 ASSERT(visitor->from_->Contains(raw_addr)); | 659 ASSERT(visitor->from_->Contains(raw_addr)); |
601 uword header = *reinterpret_cast<uword*>(raw_addr); | 660 uword header = *reinterpret_cast<uword*>(raw_addr); |
602 // Reset the next pointer in the weak property. | 661 // Reset the next pointer in the weak property. |
603 cur_weak->ptr()->next_ = 0; | 662 cur_weak->ptr()->next_ = 0; |
604 if (IsForwarding(header)) { | 663 if (IsForwarding(header)) { |
605 cur_weak->VisitPointersNonvirtual(visitor); | 664 cur_weak->VisitPointersNonvirtual(visitor); |
606 } else { | 665 } else { |
607 EnqueueWeakProperty(cur_weak); | 666 EnqueueWeakProperty(cur_weak); |
608 } | 667 } |
668 | |
669 if (thread->heap() != NULL) { | |
670 top = thread->top(); | |
671 } else { | |
672 top = top_; | |
673 } | |
674 | |
609 // Advance to next weak property in the queue. | 675 // Advance to next weak property in the queue. |
610 cur_weak = reinterpret_cast<RawWeakProperty*>(next_weak); | 676 cur_weak = reinterpret_cast<RawWeakProperty*>(next_weak); |
611 } | 677 } |
612 } | 678 } |
613 } | 679 } |
614 } | 680 } |
615 | 681 |
616 | 682 |
617 void Scavenger::UpdateMaxHeapCapacity() { | 683 void Scavenger::UpdateMaxHeapCapacity() { |
618 if (heap_ == NULL) { | 684 if (heap_ == NULL) { |
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
725 | 791 |
726 // Advance to next weak property in the queue. | 792 // Advance to next weak property in the queue. |
727 cur_weak = reinterpret_cast<RawWeakProperty*>(next_weak); | 793 cur_weak = reinterpret_cast<RawWeakProperty*>(next_weak); |
728 } | 794 } |
729 } | 795 } |
730 } | 796 } |
731 | 797 |
732 | 798 |
733 void Scavenger::VisitObjectPointers(ObjectPointerVisitor* visitor) const { | 799 void Scavenger::VisitObjectPointers(ObjectPointerVisitor* visitor) const { |
734 uword cur = FirstObjectStart(); | 800 uword cur = FirstObjectStart(); |
735 while (cur < top_) { | 801 Thread* thread = Thread::Current(); |
802 | |
803 if (!thread->IsMutatorThread()) { | |
rmacnak
2017/07/05 17:39:53
Ditto
danunez
2017/07/05 18:12:55
Done.
| |
804 thread = Isolate::Current()->mutator_thread(); | |
805 } | |
806 | |
807 uword top = CapacityInWords() == 0 ? 0 : thread->top(); | |
808 | |
809 if (thread->heap() == NULL) { | |
810 top = top_; | |
811 } | |
812 | |
813 while (cur < top) { | |
736 RawObject* raw_obj = RawObject::FromAddr(cur); | 814 RawObject* raw_obj = RawObject::FromAddr(cur); |
737 cur += raw_obj->VisitPointers(visitor); | 815 cur += raw_obj->VisitPointers(visitor); |
738 } | 816 } |
739 } | 817 } |
740 | 818 |
741 | 819 |
742 void Scavenger::VisitObjects(ObjectVisitor* visitor) const { | 820 void Scavenger::VisitObjects(ObjectVisitor* visitor) const { |
743 uword cur = FirstObjectStart(); | 821 uword cur = FirstObjectStart(); |
744 while (cur < top_) { | 822 Thread* thread = Thread::Current(); |
823 | |
824 if (!thread->IsMutatorThread()) { | |
rmacnak
2017/07/05 17:39:53
Ditto
danunez
2017/07/05 18:12:55
Done.
| |
825 thread = Isolate::Current()->mutator_thread(); | |
826 } | |
827 | |
828 uword top = CapacityInWords() == 0 ? 0 : thread->top(); | |
829 | |
830 if (thread->heap() == NULL) { | |
831 top = top_; | |
832 } | |
833 | |
834 while (cur < top) { | |
745 RawObject* raw_obj = RawObject::FromAddr(cur); | 835 RawObject* raw_obj = RawObject::FromAddr(cur); |
746 visitor->VisitObject(raw_obj); | 836 visitor->VisitObject(raw_obj); |
747 cur += raw_obj->Size(); | 837 cur += raw_obj->Size(); |
748 } | 838 } |
749 } | 839 } |
750 | 840 |
751 | 841 |
752 void Scavenger::AddRegionsToObjectSet(ObjectSet* set) const { | 842 void Scavenger::AddRegionsToObjectSet(ObjectSet* set) const { |
753 set->AddRegion(to_->start(), to_->end()); | 843 set->AddRegion(to_->start(), to_->end()); |
754 } | 844 } |
755 | 845 |
756 | 846 |
757 RawObject* Scavenger::FindObject(FindObjectVisitor* visitor) const { | 847 RawObject* Scavenger::FindObject(FindObjectVisitor* visitor) const { |
758 ASSERT(!scavenging_); | 848 ASSERT(!scavenging_); |
849 Thread* thread = Thread::Current(); | |
850 | |
851 if (!thread->IsMutatorThread()) { | |
rmacnak
2017/07/05 17:39:52
Ditto
danunez
2017/07/05 18:12:55
Done.
| |
852 thread = Isolate::Current()->mutator_thread(); | |
853 } | |
854 | |
855 uword top = CapacityInWords() == 0 ? 0 : thread->top(); | |
759 uword cur = FirstObjectStart(); | 856 uword cur = FirstObjectStart(); |
760 if (visitor->VisitRange(cur, top_)) { | 857 |
761 while (cur < top_) { | 858 if (thread->heap() == NULL) { |
859 top = top_; | |
860 } | |
861 | |
862 if (visitor->VisitRange(cur, top)) { | |
863 while (cur < top) { | |
762 RawObject* raw_obj = RawObject::FromAddr(cur); | 864 RawObject* raw_obj = RawObject::FromAddr(cur); |
763 uword next = cur + raw_obj->Size(); | 865 uword next = cur + raw_obj->Size(); |
764 if (visitor->VisitRange(cur, next) && raw_obj->FindObject(visitor)) { | 866 if (visitor->VisitRange(cur, next) && raw_obj->FindObject(visitor)) { |
765 return raw_obj; // Found object, return it. | 867 return raw_obj; // Found object, return it. |
766 } | 868 } |
767 cur = next; | 869 cur = next; |
768 } | 870 } |
769 ASSERT(cur == top_); | |
770 } | 871 } |
771 return Object::null(); | 872 return Object::null(); |
772 } | 873 } |
773 | 874 |
774 | 875 |
775 void Scavenger::Scavenge() { | 876 void Scavenger::Scavenge() { |
776 // TODO(cshapiro): Add a decision procedure for determining when the | 877 // TODO(cshapiro): Add a decision procedure for determining when the |
777 // the API callbacks should be invoked. | 878 // the API callbacks should be invoked. |
778 Scavenge(false); | 879 Scavenge(false); |
779 } | 880 } |
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
899 } | 1000 } |
900 | 1001 |
901 | 1002 |
902 void Scavenger::FreeExternal(intptr_t size) { | 1003 void Scavenger::FreeExternal(intptr_t size) { |
903 ASSERT(size >= 0); | 1004 ASSERT(size >= 0); |
904 external_size_ -= size; | 1005 external_size_ -= size; |
905 ASSERT(external_size_ >= 0); | 1006 ASSERT(external_size_ >= 0); |
906 } | 1007 } |
907 | 1008 |
908 } // namespace dart | 1009 } // namespace dart |
OLD | NEW |