| OLD | NEW |
| 1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
| 4 | 4 |
| 5 #include "vm/scavenger.h" | 5 #include "vm/scavenger.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <map> | 8 #include <map> |
| 9 #include <utility> | 9 #include <utility> |
| 10 | 10 |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 71 } | 71 } |
| 72 | 72 |
| 73 private: | 73 private: |
| 74 bool* _addr; | 74 bool* _addr; |
| 75 bool _value; | 75 bool _value; |
| 76 }; | 76 }; |
| 77 | 77 |
| 78 | 78 |
| 79 class ScavengerVisitor : public ObjectPointerVisitor { | 79 class ScavengerVisitor : public ObjectPointerVisitor { |
| 80 public: | 80 public: |
| 81 explicit ScavengerVisitor(Isolate* isolate, Scavenger* scavenger) | 81 explicit ScavengerVisitor(Isolate* isolate, |
| 82 Scavenger* scavenger, |
| 83 SemiSpace* from) |
| 82 : ObjectPointerVisitor(isolate), | 84 : ObjectPointerVisitor(isolate), |
| 83 thread_(Thread::Current()), | 85 thread_(Thread::Current()), |
| 84 scavenger_(scavenger), | 86 scavenger_(scavenger), |
| 85 from_start_(scavenger_->from_->start()), | 87 from_(from), |
| 86 from_size_(scavenger_->from_->end() - scavenger_->from_->start()), | |
| 87 heap_(scavenger->heap_), | 88 heap_(scavenger->heap_), |
| 88 vm_heap_(Dart::vm_isolate()->heap()), | 89 vm_heap_(Dart::vm_isolate()->heap()), |
| 89 page_space_(scavenger->heap_->old_space()), | 90 page_space_(scavenger->heap_->old_space()), |
| 90 delayed_weak_stack_(), | 91 delayed_weak_stack_(), |
| 91 bytes_promoted_(0), | 92 bytes_promoted_(0), |
| 92 visiting_old_object_(NULL), | 93 visiting_old_object_(NULL), |
| 93 in_scavenge_pointer_(false) { } | 94 in_scavenge_pointer_(false) { } |
| 94 | 95 |
| 95 void VisitPointers(RawObject** first, RawObject** last) { | 96 void VisitPointers(RawObject** first, RawObject** last) { |
| 96 for (RawObject** current = first; current <= last; current++) { | 97 for (RawObject** current = first; current <= last; current++) { |
| (...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 149 ASSERT(!in_scavenge_pointer_); | 150 ASSERT(!in_scavenge_pointer_); |
| 150 BoolScope bs(&in_scavenge_pointer_, true); | 151 BoolScope bs(&in_scavenge_pointer_, true); |
| 151 #endif | 152 #endif |
| 152 | 153 |
| 153 RawObject* raw_obj = *p; | 154 RawObject* raw_obj = *p; |
| 154 | 155 |
| 155 if (raw_obj->IsSmiOrOldObject()) { | 156 if (raw_obj->IsSmiOrOldObject()) { |
| 156 return; | 157 return; |
| 157 } | 158 } |
| 158 | 159 |
| 159 // The scavenger is only interested in objects located in the from space. | |
| 160 // | |
| 161 // We are using address math here and relying on the unsigned underflow | |
| 162 // in the code below to avoid having two checks. | |
| 163 uword obj_offset = reinterpret_cast<uword>(raw_obj) - from_start_; | |
| 164 if (obj_offset > from_size_) { | |
| 165 ASSERT(scavenger_->to_->Contains(RawObject::ToAddr(raw_obj))); | |
| 166 return; | |
| 167 } | |
| 168 | |
| 169 uword raw_addr = RawObject::ToAddr(raw_obj); | 160 uword raw_addr = RawObject::ToAddr(raw_obj); |
| 161 // The scavenger is only interested in objects located in the from space. |
| 162 ASSERT(from_->Contains(raw_addr)); |
| 170 // Read the header word of the object and determine if the object has | 163 // Read the header word of the object and determine if the object has |
| 171 // already been copied. | 164 // already been copied. |
| 172 uword header = *reinterpret_cast<uword*>(raw_addr); | 165 uword header = *reinterpret_cast<uword*>(raw_addr); |
| 173 uword new_addr = 0; | 166 uword new_addr = 0; |
| 174 if (IsForwarding(header)) { | 167 if (IsForwarding(header)) { |
| 175 // Get the new location of the object. | 168 // Get the new location of the object. |
| 176 new_addr = ForwardedAddr(header); | 169 new_addr = ForwardedAddr(header); |
| 177 } else { | 170 } else { |
| 178 if (raw_obj->IsWatched()) { | 171 if (raw_obj->IsWatched()) { |
| 179 raw_obj->ClearWatchedBitUnsynchronized(); | 172 raw_obj->ClearWatchedBitUnsynchronized(); |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 235 *p = new_obj; | 228 *p = new_obj; |
| 236 // Update the store buffer as needed. | 229 // Update the store buffer as needed. |
| 237 if (visiting_old_object_ != NULL) { | 230 if (visiting_old_object_ != NULL) { |
| 238 VerifiedMemory::Accept(reinterpret_cast<uword>(p), sizeof(*p)); | 231 VerifiedMemory::Accept(reinterpret_cast<uword>(p), sizeof(*p)); |
| 239 UpdateStoreBuffer(p, new_obj); | 232 UpdateStoreBuffer(p, new_obj); |
| 240 } | 233 } |
| 241 } | 234 } |
| 242 | 235 |
| 243 Thread* thread_; | 236 Thread* thread_; |
| 244 Scavenger* scavenger_; | 237 Scavenger* scavenger_; |
| 245 uword from_start_; | 238 SemiSpace* from_; |
| 246 uword from_size_; | |
| 247 Heap* heap_; | 239 Heap* heap_; |
| 248 Heap* vm_heap_; | 240 Heap* vm_heap_; |
| 249 PageSpace* page_space_; | 241 PageSpace* page_space_; |
| 250 typedef std::multimap<RawObject*, RawWeakProperty*> DelaySet; | 242 typedef std::multimap<RawObject*, RawWeakProperty*> DelaySet; |
| 251 DelaySet delay_set_; | 243 DelaySet delay_set_; |
| 252 GrowableArray<RawObject*> delayed_weak_stack_; | 244 GrowableArray<RawObject*> delayed_weak_stack_; |
| 253 // TODO(cshapiro): use this value to compute survival statistics for | 245 // TODO(cshapiro): use this value to compute survival statistics for |
| 254 // new space growth policy. | 246 // new space growth policy. |
| 255 intptr_t bytes_promoted_; | 247 intptr_t bytes_promoted_; |
| 256 RawObject* visiting_old_object_; | 248 RawObject* visiting_old_object_; |
| (...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 415 // going to use for forwarding pointers. | 407 // going to use for forwarding pointers. |
| 416 ASSERT(Object::tags_offset() == 0); | 408 ASSERT(Object::tags_offset() == 0); |
| 417 | 409 |
| 418 // Set initial size resulting in a total of three different levels. | 410 // Set initial size resulting in a total of three different levels. |
| 419 const intptr_t initial_semi_capacity_in_words = max_semi_capacity_in_words / | 411 const intptr_t initial_semi_capacity_in_words = max_semi_capacity_in_words / |
| 420 (FLAG_new_gen_growth_factor * FLAG_new_gen_growth_factor); | 412 (FLAG_new_gen_growth_factor * FLAG_new_gen_growth_factor); |
| 421 to_ = SemiSpace::New(initial_semi_capacity_in_words); | 413 to_ = SemiSpace::New(initial_semi_capacity_in_words); |
| 422 if (to_ == NULL) { | 414 if (to_ == NULL) { |
| 423 FATAL("Out of memory.\n"); | 415 FATAL("Out of memory.\n"); |
| 424 } | 416 } |
| 425 from_ = NULL; | |
| 426 | 417 |
| 427 // Setup local fields. | 418 // Setup local fields. |
| 428 top_ = FirstObjectStart(); | 419 top_ = FirstObjectStart(); |
| 429 resolved_top_ = top_; | 420 resolved_top_ = top_; |
| 430 end_ = to_->end(); | 421 end_ = to_->end(); |
| 431 | 422 |
| 432 survivor_end_ = FirstObjectStart(); | 423 survivor_end_ = FirstObjectStart(); |
| 433 } | 424 } |
| 434 | 425 |
| 435 | 426 |
| 436 Scavenger::~Scavenger() { | 427 Scavenger::~Scavenger() { |
| 437 ASSERT(!scavenging_); | 428 ASSERT(!scavenging_); |
| 438 ASSERT(from_ == NULL); | |
| 439 to_->Delete(); | 429 to_->Delete(); |
| 440 } | 430 } |
| 441 | 431 |
| 442 | 432 |
| 443 intptr_t Scavenger::NewSizeInWords(intptr_t old_size_in_words) const { | 433 intptr_t Scavenger::NewSizeInWords(intptr_t old_size_in_words) const { |
| 444 if (stats_history_.Size() == 0) { | 434 if (stats_history_.Size() == 0) { |
| 445 return old_size_in_words; | 435 return old_size_in_words; |
| 446 } | 436 } |
| 447 double garbage = stats_history_.Get(0).GarbageFraction(); | 437 double garbage = stats_history_.Get(0).GarbageFraction(); |
| 448 if (garbage < (FLAG_new_gen_garbage_threshold / 100.0)) { | 438 if (garbage < (FLAG_new_gen_garbage_threshold / 100.0)) { |
| 449 return Utils::Minimum(max_semi_capacity_in_words_, | 439 return Utils::Minimum(max_semi_capacity_in_words_, |
| 450 old_size_in_words * FLAG_new_gen_growth_factor); | 440 old_size_in_words * FLAG_new_gen_growth_factor); |
| 451 } else { | 441 } else { |
| 452 return old_size_in_words; | 442 return old_size_in_words; |
| 453 } | 443 } |
| 454 } | 444 } |
| 455 | 445 |
| 456 | 446 |
| 457 void Scavenger::Prologue(Isolate* isolate, bool invoke_api_callbacks) { | 447 SemiSpace* Scavenger::Prologue(Isolate* isolate, bool invoke_api_callbacks) { |
| 458 if (invoke_api_callbacks && (isolate->gc_prologue_callback() != NULL)) { | 448 if (invoke_api_callbacks && (isolate->gc_prologue_callback() != NULL)) { |
| 459 (isolate->gc_prologue_callback())(); | 449 (isolate->gc_prologue_callback())(); |
| 460 } | 450 } |
| 461 Thread::PrepareForGC(); | 451 Thread::PrepareForGC(); |
| 462 // Flip the two semi-spaces so that to_ is always the space for allocating | 452 // Flip the two semi-spaces so that to_ is always the space for allocating |
| 463 // objects. | 453 // objects. |
| 464 from_ = to_; | 454 SemiSpace* from = to_; |
| 465 to_ = SemiSpace::New(NewSizeInWords(from_->size_in_words())); | 455 to_ = SemiSpace::New(NewSizeInWords(from->size_in_words())); |
| 466 if (to_ == NULL) { | 456 if (to_ == NULL) { |
| 467 // TODO(koda): We could try to recover (collect old space, wait for another | 457 // TODO(koda): We could try to recover (collect old space, wait for another |
| 468 // isolate to finish scavenge, etc.). | 458 // isolate to finish scavenge, etc.). |
| 469 FATAL("Out of memory.\n"); | 459 FATAL("Out of memory.\n"); |
| 470 } | 460 } |
| 471 top_ = FirstObjectStart(); | 461 top_ = FirstObjectStart(); |
| 472 resolved_top_ = top_; | 462 resolved_top_ = top_; |
| 473 end_ = to_->end(); | 463 end_ = to_->end(); |
| 464 return from; |
| 474 } | 465 } |
| 475 | 466 |
| 476 | 467 |
| 477 void Scavenger::Epilogue(Isolate* isolate, | 468 void Scavenger::Epilogue(Isolate* isolate, |
| 469 SemiSpace* from, |
| 478 bool invoke_api_callbacks) { | 470 bool invoke_api_callbacks) { |
| 479 // All objects in the to space have been copied from the from space at this | 471 // All objects in the to space have been copied from the from space at this |
| 480 // moment. | 472 // moment. |
| 481 double avg_frac = stats_history_.Get(0).PromoCandidatesSuccessFraction(); | 473 double avg_frac = stats_history_.Get(0).PromoCandidatesSuccessFraction(); |
| 482 if (stats_history_.Size() >= 2) { | 474 if (stats_history_.Size() >= 2) { |
| 483 // Previous scavenge is only given half as much weight. | 475 // Previous scavenge is only given half as much weight. |
| 484 avg_frac += 0.5 * stats_history_.Get(1).PromoCandidatesSuccessFraction(); | 476 avg_frac += 0.5 * stats_history_.Get(1).PromoCandidatesSuccessFraction(); |
| 485 avg_frac /= 1.0 + 0.5; // Normalize. | 477 avg_frac /= 1.0 + 0.5; // Normalize. |
| 486 } | 478 } |
| 487 if (avg_frac < (FLAG_early_tenuring_threshold / 100.0)) { | 479 if (avg_frac < (FLAG_early_tenuring_threshold / 100.0)) { |
| (...skipping 11 matching lines...) Expand all Loading... |
| 499 // being spawned. | 491 // being spawned. |
| 500 { | 492 { |
| 501 PageSpace* page_space = heap_->old_space(); | 493 PageSpace* page_space = heap_->old_space(); |
| 502 MonitorLocker ml(page_space->tasks_lock()); | 494 MonitorLocker ml(page_space->tasks_lock()); |
| 503 if (page_space->tasks() == 0) { | 495 if (page_space->tasks() == 0) { |
| 504 VerifyStoreBufferPointerVisitor verify_store_buffer_visitor(isolate, to_); | 496 VerifyStoreBufferPointerVisitor verify_store_buffer_visitor(isolate, to_); |
| 505 heap_->old_space()->VisitObjectPointers(&verify_store_buffer_visitor); | 497 heap_->old_space()->VisitObjectPointers(&verify_store_buffer_visitor); |
| 506 } | 498 } |
| 507 } | 499 } |
| 508 #endif // defined(DEBUG) | 500 #endif // defined(DEBUG) |
| 509 from_->Delete(); | 501 from->Delete(); |
| 510 from_ = NULL; | |
| 511 if (invoke_api_callbacks && (isolate->gc_epilogue_callback() != NULL)) { | 502 if (invoke_api_callbacks && (isolate->gc_epilogue_callback() != NULL)) { |
| 512 (isolate->gc_epilogue_callback())(); | 503 (isolate->gc_epilogue_callback())(); |
| 513 } | 504 } |
| 514 } | 505 } |
| 515 | 506 |
| 516 | 507 |
| 517 void Scavenger::IterateStoreBuffers(Isolate* isolate, | 508 void Scavenger::IterateStoreBuffers(Isolate* isolate, |
| 518 ScavengerVisitor* visitor) { | 509 ScavengerVisitor* visitor) { |
| 519 // Iterating through the store buffers. | 510 // Iterating through the store buffers. |
| 520 // Grab the deduplication sets out of the isolate's consolidated store buffer. | 511 // Grab the deduplication sets out of the isolate's consolidated store buffer. |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 576 | 567 |
| 577 bool Scavenger::IsUnreachable(RawObject** p) { | 568 bool Scavenger::IsUnreachable(RawObject** p) { |
| 578 RawObject* raw_obj = *p; | 569 RawObject* raw_obj = *p; |
| 579 if (!raw_obj->IsHeapObject()) { | 570 if (!raw_obj->IsHeapObject()) { |
| 580 return false; | 571 return false; |
| 581 } | 572 } |
| 582 if (!raw_obj->IsNewObject()) { | 573 if (!raw_obj->IsNewObject()) { |
| 583 return false; | 574 return false; |
| 584 } | 575 } |
| 585 uword raw_addr = RawObject::ToAddr(raw_obj); | 576 uword raw_addr = RawObject::ToAddr(raw_obj); |
| 586 if (!from_->Contains(raw_addr)) { | 577 if (to_->Contains(raw_addr)) { |
| 587 return false; | 578 return false; |
| 588 } | 579 } |
| 589 uword header = *reinterpret_cast<uword*>(raw_addr); | 580 uword header = *reinterpret_cast<uword*>(raw_addr); |
| 590 if (IsForwarding(header)) { | 581 if (IsForwarding(header)) { |
| 591 uword new_addr = ForwardedAddr(header); | 582 uword new_addr = ForwardedAddr(header); |
| 592 *p = RawObject::FromAddr(new_addr); | 583 *p = RawObject::FromAddr(new_addr); |
| 593 return false; | 584 return false; |
| 594 } | 585 } |
| 595 return true; | 586 return true; |
| 596 } | 587 } |
| (...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 810 if (FLAG_verify_before_gc && !FLAG_concurrent_sweep) { | 801 if (FLAG_verify_before_gc && !FLAG_concurrent_sweep) { |
| 811 OS::PrintErr("Verifying before Scavenge..."); | 802 OS::PrintErr("Verifying before Scavenge..."); |
| 812 heap_->Verify(kForbidMarked); | 803 heap_->Verify(kForbidMarked); |
| 813 OS::PrintErr(" done.\n"); | 804 OS::PrintErr(" done.\n"); |
| 814 } | 805 } |
| 815 | 806 |
| 816 // Prepare for a scavenge. | 807 // Prepare for a scavenge. |
| 817 SpaceUsage usage_before = GetCurrentUsage(); | 808 SpaceUsage usage_before = GetCurrentUsage(); |
| 818 intptr_t promo_candidate_words = | 809 intptr_t promo_candidate_words = |
| 819 (survivor_end_ - FirstObjectStart()) / kWordSize; | 810 (survivor_end_ - FirstObjectStart()) / kWordSize; |
| 820 Prologue(isolate, invoke_api_callbacks); | 811 SemiSpace* from = Prologue(isolate, invoke_api_callbacks); |
| 821 // The API prologue/epilogue may create/destroy zones, so we must not | 812 // The API prologue/epilogue may create/destroy zones, so we must not |
| 822 // depend on zone allocations surviving beyond the epilogue callback. | 813 // depend on zone allocations surviving beyond the epilogue callback. |
| 823 { | 814 { |
| 824 StackZone zone(Thread::Current()); | 815 StackZone zone(Thread::Current()); |
| 825 // Setup the visitor and run the scavenge. | 816 // Setup the visitor and run the scavenge. |
| 826 ScavengerVisitor visitor(isolate, this); | 817 ScavengerVisitor visitor(isolate, this, from); |
| 827 page_space->AcquireDataLock(); | 818 page_space->AcquireDataLock(); |
| 828 const bool prologue_weak_are_strong = !invoke_api_callbacks; | 819 const bool prologue_weak_are_strong = !invoke_api_callbacks; |
| 829 IterateRoots(isolate, &visitor, prologue_weak_are_strong); | 820 IterateRoots(isolate, &visitor, prologue_weak_are_strong); |
| 830 int64_t start = OS::GetCurrentTimeMicros(); | 821 int64_t start = OS::GetCurrentTimeMicros(); |
| 831 ProcessToSpace(&visitor); | 822 ProcessToSpace(&visitor); |
| 832 int64_t middle = OS::GetCurrentTimeMicros(); | 823 int64_t middle = OS::GetCurrentTimeMicros(); |
| 833 IterateWeakReferences(isolate, &visitor); | 824 IterateWeakReferences(isolate, &visitor); |
| 834 ScavengerWeakVisitor weak_visitor(this, prologue_weak_are_strong); | 825 ScavengerWeakVisitor weak_visitor(this, prologue_weak_are_strong); |
| 835 // Include the prologue weak handles, since we must process any promotion. | 826 // Include the prologue weak handles, since we must process any promotion. |
| 836 const bool visit_prologue_weak_handles = true; | 827 const bool visit_prologue_weak_handles = true; |
| 837 IterateWeakRoots(isolate, &weak_visitor, visit_prologue_weak_handles); | 828 IterateWeakRoots(isolate, &weak_visitor, visit_prologue_weak_handles); |
| 838 visitor.Finalize(); | 829 visitor.Finalize(); |
| 839 ProcessWeakTables(); | 830 ProcessWeakTables(); |
| 840 page_space->ReleaseDataLock(); | 831 page_space->ReleaseDataLock(); |
| 841 | 832 |
| 842 // Scavenge finished. Run accounting. | 833 // Scavenge finished. Run accounting. |
| 843 int64_t end = OS::GetCurrentTimeMicros(); | 834 int64_t end = OS::GetCurrentTimeMicros(); |
| 844 heap_->RecordTime(kProcessToSpace, middle - start); | 835 heap_->RecordTime(kProcessToSpace, middle - start); |
| 845 heap_->RecordTime(kIterateWeaks, end - middle); | 836 heap_->RecordTime(kIterateWeaks, end - middle); |
| 846 stats_history_.Add( | 837 stats_history_.Add( |
| 847 ScavengeStats(start, end, | 838 ScavengeStats(start, end, |
| 848 usage_before, GetCurrentUsage(), | 839 usage_before, GetCurrentUsage(), |
| 849 promo_candidate_words, | 840 promo_candidate_words, |
| 850 visitor.bytes_promoted() >> kWordSizeLog2)); | 841 visitor.bytes_promoted() >> kWordSizeLog2)); |
| 851 } | 842 } |
| 852 Epilogue(isolate, invoke_api_callbacks); | 843 Epilogue(isolate, from, invoke_api_callbacks); |
| 853 | 844 |
| 854 // TODO(koda): Make verification more compatible with concurrent sweep. | 845 // TODO(koda): Make verification more compatible with concurrent sweep. |
| 855 if (FLAG_verify_after_gc && !FLAG_concurrent_sweep) { | 846 if (FLAG_verify_after_gc && !FLAG_concurrent_sweep) { |
| 856 OS::PrintErr("Verifying after Scavenge..."); | 847 OS::PrintErr("Verifying after Scavenge..."); |
| 857 heap_->Verify(kForbidMarked); | 848 heap_->Verify(kForbidMarked); |
| 858 OS::PrintErr(" done.\n"); | 849 OS::PrintErr(" done.\n"); |
| 859 } | 850 } |
| 860 | 851 |
| 861 // Done scavenging. Reset the marker. | 852 // Done scavenging. Reset the marker. |
| 862 ASSERT(scavenging_); | 853 ASSERT(scavenging_); |
| 863 scavenging_ = false; | 854 scavenging_ = false; |
| 864 } | 855 } |
| 865 | 856 |
| 866 | 857 |
| 867 void Scavenger::WriteProtect(bool read_only) { | 858 void Scavenger::WriteProtect(bool read_only) { |
| 868 ASSERT(!scavenging_); | 859 ASSERT(!scavenging_); |
| 869 ASSERT(from_ == NULL); | |
| 870 to_->WriteProtect(read_only); | 860 to_->WriteProtect(read_only); |
| 871 } | 861 } |
| 872 | 862 |
| 873 | 863 |
| 874 void Scavenger::PrintToJSONObject(JSONObject* object) const { | 864 void Scavenger::PrintToJSONObject(JSONObject* object) const { |
| 875 Isolate* isolate = Isolate::Current(); | 865 Isolate* isolate = Isolate::Current(); |
| 876 ASSERT(isolate != NULL); | 866 ASSERT(isolate != NULL); |
| 877 JSONObject space(object, "new"); | 867 JSONObject space(object, "new"); |
| 878 space.AddProperty("type", "HeapSpace"); | 868 space.AddProperty("type", "HeapSpace"); |
| 879 space.AddProperty("name", "new"); | 869 space.AddProperty("name", "new"); |
| (...skipping 23 matching lines...) Expand all Loading... |
| 903 } | 893 } |
| 904 | 894 |
| 905 | 895 |
| 906 void Scavenger::FreeExternal(intptr_t size) { | 896 void Scavenger::FreeExternal(intptr_t size) { |
| 907 ASSERT(size >= 0); | 897 ASSERT(size >= 0); |
| 908 external_size_ -= size; | 898 external_size_ -= size; |
| 909 ASSERT(external_size_ >= 0); | 899 ASSERT(external_size_ >= 0); |
| 910 } | 900 } |
| 911 | 901 |
| 912 } // namespace dart | 902 } // namespace dart |
| OLD | NEW |