OLD | NEW |
---|---|
1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/scavenger.h" | 5 #include "vm/scavenger.h" |
6 | 6 |
7 #include "vm/dart.h" | 7 #include "vm/dart.h" |
8 #include "vm/dart_api_state.h" | 8 #include "vm/dart_api_state.h" |
9 #include "vm/isolate.h" | 9 #include "vm/isolate.h" |
10 #include "vm/lockers.h" | 10 #include "vm/lockers.h" |
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
131 // Get the new location of the object. | 131 // Get the new location of the object. |
132 new_addr = ForwardedAddr(header); | 132 new_addr = ForwardedAddr(header); |
133 } else { | 133 } else { |
134 intptr_t size = raw_obj->Size(); | 134 intptr_t size = raw_obj->Size(); |
135 NOT_IN_PRODUCT(intptr_t cid = raw_obj->GetClassId()); | 135 NOT_IN_PRODUCT(intptr_t cid = raw_obj->GetClassId()); |
136 NOT_IN_PRODUCT(ClassTable* class_table = isolate()->class_table()); | 136 NOT_IN_PRODUCT(ClassTable* class_table = isolate()->class_table()); |
137 // Check whether object should be promoted. | 137 // Check whether object should be promoted. |
138 if (scavenger_->survivor_end_ <= raw_addr) { | 138 if (scavenger_->survivor_end_ <= raw_addr) { |
139 // Not a survivor of a previous scavenge. Just copy the object into the | 139 // Not a survivor of a previous scavenge. Just copy the object into the |
140 // to space. | 140 // to space. |
141 new_addr = scavenger_->TryAllocate(size); | 141 new_addr = scavenger_->TryAllocateGC(size); |
142 NOT_IN_PRODUCT(class_table->UpdateLiveNew(cid, size)); | 142 NOT_IN_PRODUCT(class_table->UpdateLiveNew(cid, size)); |
143 } else { | 143 } else { |
144 // TODO(iposva): Experiment with less aggressive promotion. For example | 144 // TODO(iposva): Experiment with less aggressive promotion. For example |
145 // a coin toss determines if an object is promoted or whether it should | 145 // a coin toss determines if an object is promoted or whether it should |
146 // survive in this generation. | 146 // survive in this generation. |
147 // | 147 // |
148 // This object is a survivor of a previous scavenge. Attempt to promote | 148 // This object is a survivor of a previous scavenge. Attempt to promote |
149 // the object. | 149 // the object. |
150 new_addr = | 150 new_addr = |
151 page_space_->TryAllocatePromoLocked(size, PageSpace::kForceGrowth); | 151 page_space_->TryAllocatePromoLocked(size, PageSpace::kForceGrowth); |
152 if (new_addr != 0) { | 152 if (new_addr != 0) { |
153 // If promotion succeeded then we need to remember it so that it can | 153 // If promotion succeeded then we need to remember it so that it can |
154 // be traversed later. | 154 // be traversed later. |
155 scavenger_->PushToPromotedStack(new_addr); | 155 scavenger_->PushToPromotedStack(new_addr); |
156 bytes_promoted_ += size; | 156 bytes_promoted_ += size; |
157 NOT_IN_PRODUCT(class_table->UpdateAllocatedOld(cid, size)); | 157 NOT_IN_PRODUCT(class_table->UpdateAllocatedOld(cid, size)); |
158 } else { | 158 } else { |
159 // Promotion did not succeed. Copy into the to space instead. | 159 // Promotion did not succeed. Copy into the to space instead. |
160 scavenger_->failed_to_promote_ = true; | 160 scavenger_->failed_to_promote_ = true; |
161 new_addr = scavenger_->TryAllocate(size); | 161 new_addr = scavenger_->TryAllocateGC(size); |
162 NOT_IN_PRODUCT(class_table->UpdateLiveNew(cid, size)); | 162 NOT_IN_PRODUCT(class_table->UpdateLiveNew(cid, size)); |
163 } | 163 } |
164 } | 164 } |
165 // During a scavenge we always succeed to at least copy all of the | 165 // During a scavenge we always succeed to at least copy all of the |
166 // current objects to the to space. | 166 // current objects to the to space. |
167 ASSERT(new_addr != 0); | 167 ASSERT(new_addr != 0); |
168 // Copy the object to the new location. | 168 // Copy the object to the new location. |
169 memmove(reinterpret_cast<void*>(new_addr), | 169 memmove(reinterpret_cast<void*>(new_addr), |
170 reinterpret_cast<void*>(raw_addr), size); | 170 reinterpret_cast<void*>(raw_addr), size); |
171 // Remember forwarding address. | 171 // Remember forwarding address. |
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
333 intptr_t max_semi_capacity_in_words, | 333 intptr_t max_semi_capacity_in_words, |
334 uword object_alignment) | 334 uword object_alignment) |
335 : heap_(heap), | 335 : heap_(heap), |
336 max_semi_capacity_in_words_(max_semi_capacity_in_words), | 336 max_semi_capacity_in_words_(max_semi_capacity_in_words), |
337 object_alignment_(object_alignment), | 337 object_alignment_(object_alignment), |
338 scavenging_(false), | 338 scavenging_(false), |
339 delayed_weak_properties_(NULL), | 339 delayed_weak_properties_(NULL), |
340 gc_time_micros_(0), | 340 gc_time_micros_(0), |
341 collections_(0), | 341 collections_(0), |
342 external_size_(0), | 342 external_size_(0), |
343 failed_to_promote_(false) { | 343 failed_to_promote_(false), |
344 space_lock_(new Mutex()) { | |
344 // Verify assumptions about the first word in objects which the scavenger is | 345 // Verify assumptions about the first word in objects which the scavenger is |
345 // going to use for forwarding pointers. | 346 // going to use for forwarding pointers. |
346 ASSERT(Object::tags_offset() == 0); | 347 ASSERT(Object::tags_offset() == 0); |
347 | 348 |
348 // Set initial size resulting in a total of three different levels. | 349 // Set initial size resulting in a total of three different levels. |
349 const intptr_t initial_semi_capacity_in_words = | 350 const intptr_t initial_semi_capacity_in_words = |
350 max_semi_capacity_in_words / | 351 max_semi_capacity_in_words / |
351 (FLAG_new_gen_growth_factor * FLAG_new_gen_growth_factor); | 352 (FLAG_new_gen_growth_factor * FLAG_new_gen_growth_factor); |
352 | 353 |
353 const intptr_t kVmNameSize = 128; | 354 const intptr_t kVmNameSize = 128; |
(...skipping 11 matching lines...) Expand all Loading... | |
365 survivor_end_ = FirstObjectStart(); | 366 survivor_end_ = FirstObjectStart(); |
366 | 367 |
367 UpdateMaxHeapCapacity(); | 368 UpdateMaxHeapCapacity(); |
368 UpdateMaxHeapUsage(); | 369 UpdateMaxHeapUsage(); |
369 } | 370 } |
370 | 371 |
371 | 372 |
372 Scavenger::~Scavenger() { | 373 Scavenger::~Scavenger() { |
373 ASSERT(!scavenging_); | 374 ASSERT(!scavenging_); |
374 to_->Delete(); | 375 to_->Delete(); |
376 delete space_lock_; | |
375 } | 377 } |
376 | 378 |
377 | 379 |
378 intptr_t Scavenger::NewSizeInWords(intptr_t old_size_in_words) const { | 380 intptr_t Scavenger::NewSizeInWords(intptr_t old_size_in_words) const { |
379 if (stats_history_.Size() == 0) { | 381 if (stats_history_.Size() == 0) { |
380 return old_size_in_words; | 382 return old_size_in_words; |
381 } | 383 } |
382 double garbage = stats_history_.Get(0).GarbageFraction(); | 384 double garbage = stats_history_.Get(0).GarbageFraction(); |
383 if (garbage < (FLAG_new_gen_garbage_threshold / 100.0)) { | 385 if (garbage < (FLAG_new_gen_garbage_threshold / 100.0)) { |
384 return Utils::Minimum(max_semi_capacity_in_words_, | 386 return Utils::Minimum(max_semi_capacity_in_words_, |
385 old_size_in_words * FLAG_new_gen_growth_factor); | 387 old_size_in_words * FLAG_new_gen_growth_factor); |
386 } else { | 388 } else { |
387 return old_size_in_words; | 389 return old_size_in_words; |
388 } | 390 } |
389 } | 391 } |
390 | 392 |
391 | 393 |
392 SemiSpace* Scavenger::Prologue(Isolate* isolate, bool invoke_api_callbacks) { | 394 SemiSpace* Scavenger::Prologue(Isolate* isolate, bool invoke_api_callbacks) { |
393 if (invoke_api_callbacks && (isolate->gc_prologue_callback() != NULL)) { | 395 if (invoke_api_callbacks && (isolate->gc_prologue_callback() != NULL)) { |
394 (isolate->gc_prologue_callback())(); | 396 (isolate->gc_prologue_callback())(); |
395 } | 397 } |
396 isolate->PrepareForGC(); | 398 isolate->PrepareForGC(); |
399 | |
400 Thread* thread = Thread::Current(); | |
rmacnak
2017/07/12 17:10:22
Why not always use `isolate->mutator_thread()`?
danunez
2017/07/12 18:23:38
Looks like I missed one.
| |
401 if (!thread->IsMutatorThread()) { | |
402 thread = isolate->mutator_thread(); | |
403 } | |
404 | |
397 // Flip the two semi-spaces so that to_ is always the space for allocating | 405 // Flip the two semi-spaces so that to_ is always the space for allocating |
398 // objects. | 406 // objects. |
399 SemiSpace* from = to_; | 407 SemiSpace* from = to_; |
400 | 408 |
401 const intptr_t kVmNameSize = 128; | 409 const intptr_t kVmNameSize = 128; |
402 char vm_name[kVmNameSize]; | 410 char vm_name[kVmNameSize]; |
403 Heap::RegionName(heap_, Heap::kNew, vm_name, kVmNameSize); | 411 Heap::RegionName(heap_, Heap::kNew, vm_name, kVmNameSize); |
404 to_ = SemiSpace::New(NewSizeInWords(from->size_in_words()), vm_name); | 412 to_ = SemiSpace::New(NewSizeInWords(from->size_in_words()), vm_name); |
405 if (to_ == NULL) { | 413 if (to_ == NULL) { |
406 // TODO(koda): We could try to recover (collect old space, wait for another | 414 // TODO(koda): We could try to recover (collect old space, wait for another |
407 // isolate to finish scavenge, etc.). | 415 // isolate to finish scavenge, etc.). |
408 OUT_OF_MEMORY(); | 416 OUT_OF_MEMORY(); |
409 } | 417 } |
410 UpdateMaxHeapCapacity(); | 418 UpdateMaxHeapCapacity(); |
411 top_ = FirstObjectStart(); | 419 top_ = FirstObjectStart(); |
412 resolved_top_ = top_; | 420 resolved_top_ = top_; |
413 end_ = to_->end(); | 421 end_ = to_->end(); |
422 | |
423 if (thread->heap() == heap_) { | |
rmacnak
2017/07/12 17:10:22
We should never scavenge another isolate's heap. (
danunez
2017/07/12 18:23:38
As discussed, will replace with a check if the mut
| |
424 thread->set_top(top_); | |
425 thread->set_end(end_); | |
426 } | |
427 | |
414 return from; | 428 return from; |
415 } | 429 } |
416 | 430 |
417 | 431 |
418 void Scavenger::Epilogue(Isolate* isolate, | 432 void Scavenger::Epilogue(Isolate* isolate, |
419 SemiSpace* from, | 433 SemiSpace* from, |
420 bool invoke_api_callbacks) { | 434 bool invoke_api_callbacks) { |
421 // All objects in the to space have been copied from the from space at this | 435 // All objects in the to space have been copied from the from space at this |
422 // moment. | 436 // moment. |
437 | |
438 Thread* thread = Thread::Current(); | |
439 | |
440 if (!thread->IsMutatorThread()) { | |
441 thread = isolate->mutator_thread(); | |
442 } | |
443 | |
444 if (thread->heap() == heap_) { | |
rmacnak
2017/07/12 17:10:22
We should never scavenge another isolate's heap. (
danunez
2017/07/12 18:23:38
Done.
| |
445 thread->set_top(top_); | |
446 thread->set_end(end_); | |
447 } | |
448 | |
423 double avg_frac = stats_history_.Get(0).PromoCandidatesSuccessFraction(); | 449 double avg_frac = stats_history_.Get(0).PromoCandidatesSuccessFraction(); |
424 if (stats_history_.Size() >= 2) { | 450 if (stats_history_.Size() >= 2) { |
425 // Previous scavenge is only given half as much weight. | 451 // Previous scavenge is only given half as much weight. |
426 avg_frac += 0.5 * stats_history_.Get(1).PromoCandidatesSuccessFraction(); | 452 avg_frac += 0.5 * stats_history_.Get(1).PromoCandidatesSuccessFraction(); |
427 avg_frac /= 1.0 + 0.5; // Normalize. | 453 avg_frac /= 1.0 + 0.5; // Normalize. |
428 } | 454 } |
429 if (avg_frac < (FLAG_early_tenuring_threshold / 100.0)) { | 455 if (avg_frac < (FLAG_early_tenuring_threshold / 100.0)) { |
430 // Remember the limit to which objects have been copied. | 456 // Remember the limit to which objects have been copied. |
431 survivor_end_ = top_; | 457 survivor_end_ = top_; |
432 } else { | 458 } else { |
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
549 } | 575 } |
550 | 576 |
551 | 577 |
552 void Scavenger::IterateWeakRoots(Isolate* isolate, HandleVisitor* visitor) { | 578 void Scavenger::IterateWeakRoots(Isolate* isolate, HandleVisitor* visitor) { |
553 isolate->VisitWeakPersistentHandles(visitor); | 579 isolate->VisitWeakPersistentHandles(visitor); |
554 } | 580 } |
555 | 581 |
556 | 582 |
557 void Scavenger::ProcessToSpace(ScavengerVisitor* visitor) { | 583 void Scavenger::ProcessToSpace(ScavengerVisitor* visitor) { |
558 // Iterate until all work has been drained. | 584 // Iterate until all work has been drained. |
585 | |
rmacnak
2017/07/12 17:10:22
Spurious whitespace changes.
danunez
2017/07/12 18:23:38
Done.
| |
559 while ((resolved_top_ < top_) || PromotedStackHasMore()) { | 586 while ((resolved_top_ < top_) || PromotedStackHasMore()) { |
560 while (resolved_top_ < top_) { | 587 while (resolved_top_ < top_) { |
561 RawObject* raw_obj = RawObject::FromAddr(resolved_top_); | 588 RawObject* raw_obj = RawObject::FromAddr(resolved_top_); |
562 intptr_t class_id = raw_obj->GetClassId(); | 589 intptr_t class_id = raw_obj->GetClassId(); |
563 if (class_id != kWeakPropertyCid) { | 590 if (class_id != kWeakPropertyCid) { |
564 resolved_top_ += raw_obj->VisitPointersNonvirtual(visitor); | 591 resolved_top_ += raw_obj->VisitPointersNonvirtual(visitor); |
565 } else { | 592 } else { |
566 RawWeakProperty* raw_weak = reinterpret_cast<RawWeakProperty*>(raw_obj); | 593 RawWeakProperty* raw_weak = reinterpret_cast<RawWeakProperty*>(raw_obj); |
567 resolved_top_ += ProcessWeakProperty(raw_weak, visitor); | 594 resolved_top_ += ProcessWeakProperty(raw_weak, visitor); |
568 } | 595 } |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
601 uword raw_addr = RawObject::ToAddr(raw_key); | 628 uword raw_addr = RawObject::ToAddr(raw_key); |
602 ASSERT(visitor->from_->Contains(raw_addr)); | 629 ASSERT(visitor->from_->Contains(raw_addr)); |
603 uword header = *reinterpret_cast<uword*>(raw_addr); | 630 uword header = *reinterpret_cast<uword*>(raw_addr); |
604 // Reset the next pointer in the weak property. | 631 // Reset the next pointer in the weak property. |
605 cur_weak->ptr()->next_ = 0; | 632 cur_weak->ptr()->next_ = 0; |
606 if (IsForwarding(header)) { | 633 if (IsForwarding(header)) { |
607 cur_weak->VisitPointersNonvirtual(visitor); | 634 cur_weak->VisitPointersNonvirtual(visitor); |
608 } else { | 635 } else { |
609 EnqueueWeakProperty(cur_weak); | 636 EnqueueWeakProperty(cur_weak); |
610 } | 637 } |
638 | |
611 // Advance to next weak property in the queue. | 639 // Advance to next weak property in the queue. |
612 cur_weak = reinterpret_cast<RawWeakProperty*>(next_weak); | 640 cur_weak = reinterpret_cast<RawWeakProperty*>(next_weak); |
613 } | 641 } |
614 } | 642 } |
615 } | 643 } |
616 } | 644 } |
617 | 645 |
618 | 646 |
619 void Scavenger::UpdateMaxHeapCapacity() { | 647 void Scavenger::UpdateMaxHeapCapacity() { |
620 if (heap_ == NULL) { | 648 if (heap_ == NULL) { |
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
725 | 753 |
726 WeakProperty::Clear(cur_weak); | 754 WeakProperty::Clear(cur_weak); |
727 | 755 |
728 // Advance to next weak property in the queue. | 756 // Advance to next weak property in the queue. |
729 cur_weak = reinterpret_cast<RawWeakProperty*>(next_weak); | 757 cur_weak = reinterpret_cast<RawWeakProperty*>(next_weak); |
730 } | 758 } |
731 } | 759 } |
732 } | 760 } |
733 | 761 |
734 | 762 |
763 void Scavenger::FlushTLS() const { | |
764 if (heap_ != NULL) { | |
765 Thread* mutator_thread = heap_->isolate()->mutator_thread(); | |
766 | |
767 if (mutator_thread->heap() == heap_) { | |
768 mutator_thread->heap()->new_space()->set_top(mutator_thread->top()); | |
769 } | |
770 } | |
771 } | |
772 | |
735 void Scavenger::VisitObjectPointers(ObjectPointerVisitor* visitor) const { | 773 void Scavenger::VisitObjectPointers(ObjectPointerVisitor* visitor) const { |
774 FlushTLS(); | |
736 uword cur = FirstObjectStart(); | 775 uword cur = FirstObjectStart(); |
737 while (cur < top_) { | 776 while (cur < top_) { |
738 RawObject* raw_obj = RawObject::FromAddr(cur); | 777 RawObject* raw_obj = RawObject::FromAddr(cur); |
739 cur += raw_obj->VisitPointers(visitor); | 778 cur += raw_obj->VisitPointers(visitor); |
740 } | 779 } |
741 } | 780 } |
742 | 781 |
743 | 782 |
744 void Scavenger::VisitObjects(ObjectVisitor* visitor) const { | 783 void Scavenger::VisitObjects(ObjectVisitor* visitor) const { |
784 FlushTLS(); | |
745 uword cur = FirstObjectStart(); | 785 uword cur = FirstObjectStart(); |
746 while (cur < top_) { | 786 while (cur < top_) { |
747 RawObject* raw_obj = RawObject::FromAddr(cur); | 787 RawObject* raw_obj = RawObject::FromAddr(cur); |
748 visitor->VisitObject(raw_obj); | 788 visitor->VisitObject(raw_obj); |
749 cur += raw_obj->Size(); | 789 cur += raw_obj->Size(); |
750 } | 790 } |
751 } | 791 } |
752 | 792 |
753 | 793 |
754 void Scavenger::AddRegionsToObjectSet(ObjectSet* set) const { | 794 void Scavenger::AddRegionsToObjectSet(ObjectSet* set) const { |
755 set->AddRegion(to_->start(), to_->end()); | 795 set->AddRegion(to_->start(), to_->end()); |
756 } | 796 } |
757 | 797 |
758 | 798 |
759 RawObject* Scavenger::FindObject(FindObjectVisitor* visitor) const { | 799 RawObject* Scavenger::FindObject(FindObjectVisitor* visitor) const { |
760 ASSERT(!scavenging_); | 800 ASSERT(!scavenging_); |
801 FlushTLS(); | |
761 uword cur = FirstObjectStart(); | 802 uword cur = FirstObjectStart(); |
803 | |
762 if (visitor->VisitRange(cur, top_)) { | 804 if (visitor->VisitRange(cur, top_)) { |
763 while (cur < top_) { | 805 while (cur < top_) { |
764 RawObject* raw_obj = RawObject::FromAddr(cur); | 806 RawObject* raw_obj = RawObject::FromAddr(cur); |
765 uword next = cur + raw_obj->Size(); | 807 uword next = cur + raw_obj->Size(); |
766 if (visitor->VisitRange(cur, next) && raw_obj->FindObject(visitor)) { | 808 if (visitor->VisitRange(cur, next) && raw_obj->FindObject(visitor)) { |
767 return raw_obj; // Found object, return it. | 809 return raw_obj; // Found object, return it. |
768 } | 810 } |
769 cur = next; | 811 cur = next; |
770 } | 812 } |
771 ASSERT(cur == top_); | |
772 } | 813 } |
773 return Object::null(); | 814 return Object::null(); |
774 } | 815 } |
775 | 816 |
776 | 817 |
777 void Scavenger::Scavenge() { | 818 void Scavenger::Scavenge() { |
778 // TODO(cshapiro): Add a decision procedure for determining when the | 819 // TODO(cshapiro): Add a decision procedure for determining when the |
779 // the API callbacks should be invoked. | 820 // the API callbacks should be invoked. |
780 Scavenge(false); | 821 Scavenge(false); |
781 } | 822 } |
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
903 } | 944 } |
904 | 945 |
905 | 946 |
906 void Scavenger::FreeExternal(intptr_t size) { | 947 void Scavenger::FreeExternal(intptr_t size) { |
907 ASSERT(size >= 0); | 948 ASSERT(size >= 0); |
908 external_size_ -= size; | 949 external_size_ -= size; |
909 ASSERT(external_size_ >= 0); | 950 ASSERT(external_size_ >= 0); |
910 } | 951 } |
911 | 952 |
912 | 953 |
913 void Scavenger::Evacuate() { | 954 void Scavenger::Evacuate(Thread* thread) { |
rmacnak
2017/07/12 17:10:22
It looks like all callers pass isolate()->mutator_
danunez
2017/07/12 18:23:38
You are right. I will modify this function to grab
| |
914 // We need a safepoint here to prevent allocation right before or right after | 955 // We need a safepoint here to prevent allocation right before or right after |
915 // the scavenge. | 956 // the scavenge. |
916 // The former can introduce an object that we might fail to collect. | 957 // The former can introduce an object that we might fail to collect. |
917 // The latter means even if the scavenge promotes every object in the new | 958 // The latter means even if the scavenge promotes every object in the new |
918 // space, the new allocation means the space is not empty, | 959 // space, the new allocation means the space is not empty, |
919 // causing the assertion below to fail. | 960 // causing the assertion below to fail. |
920 SafepointOperationScope scope(Thread::Current()); | 961 SafepointOperationScope scope(Thread::Current()); |
921 | 962 |
922 // Forces the next scavenge to promote all the objects in the new space. | 963 // Forces the next scavenge to promote all the objects in the new space. |
923 survivor_end_ = top_; | 964 survivor_end_ = top_; |
965 | |
966 if (thread->heap() != NULL) { | |
rmacnak
2017/07/12 17:10:22
// Null in some unit tests. (Presumably)
danunez
2017/07/12 18:23:38
As stated above, will replace with a function that
| |
967 survivor_end_ = thread->top(); | |
968 } | |
969 | |
924 Scavenge(); | 970 Scavenge(); |
925 | 971 |
926 // It is possible for objects to stay in the new space | 972 // It is possible for objects to stay in the new space |
927 // if the VM cannot create more pages for these objects. | 973 // if the VM cannot create more pages for these objects. |
928 ASSERT((UsedInWords() == 0) || failed_to_promote_); | 974 ASSERT((UsedInWords() == 0) || failed_to_promote_); |
929 } | 975 } |
930 | 976 |
931 } // namespace dart | 977 } // namespace dart |
OLD | NEW |