OLD | NEW |
---|---|
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
11 // with the distribution. | 11 // with the distribution. |
(...skipping 404 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
416 Counters::number_of_symbols.Set(symbol_table()->NumberOfElements()); | 416 Counters::number_of_symbols.Set(symbol_table()->NumberOfElements()); |
417 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 417 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
418 ReportStatisticsAfterGC(); | 418 ReportStatisticsAfterGC(); |
419 #endif | 419 #endif |
420 #ifdef ENABLE_DEBUGGER_SUPPORT | 420 #ifdef ENABLE_DEBUGGER_SUPPORT |
421 Debug::AfterGarbageCollection(); | 421 Debug::AfterGarbageCollection(); |
422 #endif | 422 #endif |
423 } | 423 } |
424 | 424 |
425 | 425 |
426 void Heap::CollectAllGarbage(bool force_compaction) { | 426 void Heap::CollectAllGarbage(int flags) { |
427 // Since we are ignoring the return value, the exact choice of space does | 427 // Since we are ignoring the return value, the exact choice of space does |
428 // not matter, so long as we do not specify NEW_SPACE, which would not | 428 // not matter, so long as we do not specify NEW_SPACE, which would not |
429 // cause a full GC. | 429 // cause a full GC. |
430 MarkCompactCollector::SetForceCompaction(force_compaction); | 430 MarkCompactCollector::SetFlags(flags); |
431 CollectGarbage(OLD_POINTER_SPACE); | 431 CollectGarbage(OLD_POINTER_SPACE); |
432 MarkCompactCollector::SetForceCompaction(false); | 432 MarkCompactCollector::SetFlags(kNoGCFlags); |
433 } | 433 } |
434 | 434 |
435 | 435 |
436 void Heap::CollectAllAvailableGarbage() { | 436 void Heap::CollectAllAvailableGarbage() { |
437 // Since we are ignoring the return value, the exact choice of space does | 437 // Since we are ignoring the return value, the exact choice of space does |
438 // not matter, so long as we do not specify NEW_SPACE, which would not | 438 // not matter, so long as we do not specify NEW_SPACE, which would not |
439 // cause a full GC. | 439 // cause a full GC. |
440 MarkCompactCollector::SetForceCompaction(true); | 440 MarkCompactCollector::SetFlags(kSweepPreciselyMask | kForceCompactionMask); |
441 | 441 |
442 // Major GC would invoke weak handle callbacks on weakly reachable | 442 // Major GC would invoke weak handle callbacks on weakly reachable |
443 // handles, but won't collect weakly reachable objects until next | 443 // handles, but won't collect weakly reachable objects until next |
444 // major GC. Therefore if we collect aggressively and weak handle callback | 444 // major GC. Therefore if we collect aggressively and weak handle callback |
445 // has been invoked, we rerun major GC to release objects which become | 445 // has been invoked, we rerun major GC to release objects which become |
446 // garbage. | 446 // garbage. |
447 // Note: as weak callbacks can execute arbitrary code, we cannot | 447 // Note: as weak callbacks can execute arbitrary code, we cannot |
448 // hope that eventually there will be no weak callbacks invocations. | 448 // hope that eventually there will be no weak callbacks invocations. |
449 // Therefore stop recollecting after several attempts. | 449 // Therefore stop recollecting after several attempts. |
450 const int kMaxNumberOfAttempts = 7; | 450 const int kMaxNumberOfAttempts = 7; |
451 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) { | 451 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) { |
452 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) { | 452 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) { |
453 break; | 453 break; |
454 } | 454 } |
455 } | 455 } |
456 MarkCompactCollector::SetForceCompaction(false); | 456 MarkCompactCollector::SetFlags(kNoGCFlags); |
457 } | 457 } |
458 | 458 |
459 | 459 |
460 bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) { | 460 bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) { |
461 // The VM is in the GC state until exiting this function. | 461 // The VM is in the GC state until exiting this function. |
462 VMState state(GC); | 462 VMState state(GC); |
463 | 463 |
464 #ifdef DEBUG | 464 #ifdef DEBUG |
465 // Reset the allocation timeout to the GC interval, but make sure to | 465 // Reset the allocation timeout to the GC interval, but make sure to |
466 // allow at least a few allocations after a collection. The reason | 466 // allow at least a few allocations after a collection. The reason |
(...skipping 441 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
908 } | 908 } |
909 } | 909 } |
910 }; | 910 }; |
911 | 911 |
912 | 912 |
913 static void VerifyNonPointerSpacePointers() { | 913 static void VerifyNonPointerSpacePointers() { |
914 // Verify that there are no pointers to new space in spaces where we | 914 // Verify that there are no pointers to new space in spaces where we |
915 // do not expect them. | 915 // do not expect them. |
916 VerifyNonPointerSpacePointersVisitor v; | 916 VerifyNonPointerSpacePointersVisitor v; |
917 HeapObjectIterator code_it(Heap::code_space()); | 917 HeapObjectIterator code_it(Heap::code_space()); |
918 for (HeapObject* object = code_it.next(); | 918 for (HeapObject* object = code_it.Next(); |
919 object != NULL; object = code_it.next()) | 919 object != NULL; object = code_it.Next()) |
920 object->Iterate(&v); | 920 object->Iterate(&v); |
921 | 921 |
922 HeapObjectIterator data_it(Heap::old_data_space()); | 922 // The old data space was normally swept conservatively so that the iterator |
923 for (HeapObject* object = data_it.next(); | 923 // doesn't work, so we normally skip the next bit. |
924 object != NULL; object = data_it.next()) | 924 if (!Heap::old_data_space()->was_swept_conservatively()) { |
925 object->Iterate(&v); | 925 HeapObjectIterator data_it(Heap::old_data_space()); |
926 for (HeapObject* object = data_it.Next(); | |
927 object != NULL; object = data_it.Next()) | |
928 object->Iterate(&v); | |
929 } | |
926 } | 930 } |
927 #endif | 931 #endif |
928 | 932 |
929 | 933 |
930 void Heap::CheckNewSpaceExpansionCriteria() { | 934 void Heap::CheckNewSpaceExpansionCriteria() { |
931 if (new_space_.Capacity() < new_space_.MaximumCapacity() && | 935 if (new_space_.Capacity() < new_space_.MaximumCapacity() && |
932 survived_since_last_expansion_ > new_space_.Capacity()) { | 936 survived_since_last_expansion_ > new_space_.Capacity()) { |
933 // Grow the size of new space if there is room to grow and enough | 937 // Grow the size of new space if there is room to grow and enough |
934 // data has survived scavenge since the last expansion. | 938 // data has survived scavenge since the last expansion. |
935 new_space_.Grow(); | 939 new_space_.Grow(); |
936 survived_since_last_expansion_ = 0; | 940 survived_since_last_expansion_ = 0; |
937 } | 941 } |
938 } | 942 } |
939 | 943 |
940 | 944 |
941 void Heap::Scavenge() { | 945 void Heap::Scavenge() { |
942 #ifdef DEBUG | 946 #ifdef DEBUG |
943 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers(); | 947 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers(); |
944 #endif | 948 #endif |
945 | 949 |
946 gc_state_ = SCAVENGE; | 950 gc_state_ = SCAVENGE; |
947 | 951 |
948 Page::FlipMeaningOfInvalidatedWatermarkFlag(); | |
949 | |
950 // We do not update an allocation watermark of the top page during linear | |
951 // allocation to avoid overhead. So to maintain the watermark invariant | |
952 // we have to manually cache the watermark and mark the top page as having an | |
953 // invalid watermark. This guarantees that old space pointer iteration will | |
954 // use a correct watermark even if a linear allocation happens. | |
955 old_pointer_space_->FlushTopPageWatermark(); | |
956 map_space_->FlushTopPageWatermark(); | |
957 | |
958 // Implements Cheney's copying algorithm | 952 // Implements Cheney's copying algorithm |
959 LOG(ResourceEvent("scavenge", "begin")); | 953 LOG(ResourceEvent("scavenge", "begin")); |
960 | 954 |
961 // Clear descriptor cache. | 955 // Clear descriptor cache. |
962 DescriptorLookupCache::Clear(); | 956 DescriptorLookupCache::Clear(); |
963 | 957 |
964 // Used for updating survived_since_last_expansion_ at function end. | 958 // Used for updating survived_since_last_expansion_ at function end. |
965 intptr_t survived_watermark = PromotedSpaceSize(); | 959 intptr_t survived_watermark = PromotedSpaceSize(); |
966 | 960 |
967 CheckNewSpaceExpansionCriteria(); | 961 CheckNewSpaceExpansionCriteria(); |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1000 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE); | 994 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE); |
1001 | 995 |
1002 // Copy objects reachable from the old generation. | 996 // Copy objects reachable from the old generation. |
1003 { | 997 { |
1004 StoreBufferRebuildScope scope; | 998 StoreBufferRebuildScope scope; |
1005 StoreBuffer::IteratePointersToNewSpace(&ScavengeObject); | 999 StoreBuffer::IteratePointersToNewSpace(&ScavengeObject); |
1006 } | 1000 } |
1007 | 1001 |
1008 // Copy objects reachable from cells by scavenging cell values directly. | 1002 // Copy objects reachable from cells by scavenging cell values directly. |
1009 HeapObjectIterator cell_iterator(cell_space_); | 1003 HeapObjectIterator cell_iterator(cell_space_); |
1010 for (HeapObject* cell = cell_iterator.next(); | 1004 for (HeapObject* cell = cell_iterator.Next(); |
1011 cell != NULL; cell = cell_iterator.next()) { | 1005 cell != NULL; cell = cell_iterator.Next()) { |
1012 if (cell->IsJSGlobalPropertyCell()) { | 1006 if (cell->IsJSGlobalPropertyCell()) { |
1013 Address value_address = | 1007 Address value_address = |
1014 reinterpret_cast<Address>(cell) + | 1008 reinterpret_cast<Address>(cell) + |
1015 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); | 1009 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); |
1016 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); | 1010 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); |
1017 } | 1011 } |
1018 } | 1012 } |
1019 | 1013 |
1020 // Scavenge object reachable from the global contexts list directly. | 1014 // Scavenge object reachable from the global contexts list directly. |
1021 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_)); | 1015 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_)); |
(...skipping 2733 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3755 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE; | 3749 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE; |
3756 Object* result; | 3750 Object* result; |
3757 { MaybeObject* maybe_result = Heap::Allocate(map, space); | 3751 { MaybeObject* maybe_result = Heap::Allocate(map, space); |
3758 if (!maybe_result->ToObject(&result)) return maybe_result; | 3752 if (!maybe_result->ToObject(&result)) return maybe_result; |
3759 } | 3753 } |
3760 Struct::cast(result)->InitializeBody(size); | 3754 Struct::cast(result)->InitializeBody(size); |
3761 return result; | 3755 return result; |
3762 } | 3756 } |
3763 | 3757 |
3764 | 3758 |
3759 void Heap::EnsureHeapIsIterable() { | |
3760 ASSERT(IsAllocationAllowed()); | |
3761 if (IncrementalMarking::state() != IncrementalMarking::STOPPED || | |
3762 old_pointer_space()->was_swept_conservatively() || | |
3763 old_data_space()->was_swept_conservatively()) { | |
3764 CollectAllGarbage(kSweepPreciselyMask); | |
3765 } | |
3766 ASSERT(!old_pointer_space()->was_swept_conservatively()); | |
3767 ASSERT(!old_data_space()->was_swept_conservatively()); | |
3768 } | |
3769 | |
3770 | |
3765 bool Heap::IdleNotification() { | 3771 bool Heap::IdleNotification() { |
3766 static const int kIdlesBeforeScavenge = 4; | 3772 static const int kIdlesBeforeScavenge = 4; |
3767 static const int kIdlesBeforeMarkSweep = 7; | 3773 static const int kIdlesBeforeMarkSweep = 7; |
3768 static const int kIdlesBeforeMarkCompact = 8; | 3774 static const int kIdlesBeforeMarkCompact = 8; |
3769 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1; | 3775 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1; |
3770 static const int kGCsBetweenCleanup = 4; | 3776 static const int kGCsBetweenCleanup = 4; |
3771 static int number_idle_notifications = 0; | 3777 static int number_idle_notifications = 0; |
3772 static int last_gc_count = gc_count_; | 3778 static int last_gc_count = gc_count_; |
3773 | 3779 |
3774 bool uncommit = true; | 3780 bool uncommit = true; |
3775 bool finished = false; | 3781 bool finished = false; |
3776 | 3782 |
3777 // Reset the number of idle notifications received when a number of | 3783 // Reset the number of idle notifications received when a number of |
3778 // GCs have taken place. This allows another round of cleanup based | 3784 // GCs have taken place. This allows another round of cleanup based |
3779 // on idle notifications if enough work has been carried out to | 3785 // on idle notifications if enough work has been carried out to |
3780 // provoke a number of garbage collections. | 3786 // provoke a number of garbage collections. |
3781 if (gc_count_ < last_gc_count + kGCsBetweenCleanup) { | 3787 if (gc_count_ < last_gc_count + kGCsBetweenCleanup) { |
3782 number_idle_notifications = | 3788 number_idle_notifications = |
3783 Min(number_idle_notifications + 1, kMaxIdleCount); | 3789 Min(number_idle_notifications + 1, kMaxIdleCount); |
3784 } else { | 3790 } else { |
3785 number_idle_notifications = 0; | 3791 number_idle_notifications = 0; |
3786 last_gc_count = gc_count_; | 3792 last_gc_count = gc_count_; |
3787 } | 3793 } |
3788 | 3794 |
3789 if (number_idle_notifications == kIdlesBeforeScavenge) { | 3795 if (number_idle_notifications == kIdlesBeforeScavenge) { |
3790 if (contexts_disposed_ > 0) { | 3796 if (contexts_disposed_ > 0) { |
3791 HistogramTimerScope scope(&Counters::gc_context); | 3797 HistogramTimerScope scope(&Counters::gc_context); |
3792 CollectAllGarbage(false); | 3798 CollectAllGarbage(kNoGCFlags); |
3793 } else { | 3799 } else { |
3794 CollectGarbage(NEW_SPACE); | 3800 CollectGarbage(NEW_SPACE); |
3795 } | 3801 } |
3796 new_space_.Shrink(); | 3802 new_space_.Shrink(); |
3797 last_gc_count = gc_count_; | 3803 last_gc_count = gc_count_; |
3798 } else if (number_idle_notifications == kIdlesBeforeMarkSweep) { | 3804 } else if (number_idle_notifications == kIdlesBeforeMarkSweep) { |
3799 // Before doing the mark-sweep collections we clear the | 3805 // Before doing the mark-sweep collections we clear the |
3800 // compilation cache to avoid hanging on to source code and | 3806 // compilation cache to avoid hanging on to source code and |
3801 // generated code for cached functions. | 3807 // generated code for cached functions. |
3802 CompilationCache::Clear(); | 3808 CompilationCache::Clear(); |
3803 | 3809 |
3804 CollectAllGarbage(false); | 3810 CollectAllGarbage(kNoGCFlags); |
3805 new_space_.Shrink(); | 3811 new_space_.Shrink(); |
3806 last_gc_count = gc_count_; | 3812 last_gc_count = gc_count_; |
3807 | 3813 |
3808 } else if (number_idle_notifications == kIdlesBeforeMarkCompact) { | 3814 } else if (number_idle_notifications == kIdlesBeforeMarkCompact) { |
3809 CollectAllGarbage(true); | 3815 CollectAllGarbage(kForceCompactionMask); |
3810 new_space_.Shrink(); | 3816 new_space_.Shrink(); |
3811 last_gc_count = gc_count_; | 3817 last_gc_count = gc_count_; |
3812 finished = true; | 3818 finished = true; |
3813 | 3819 |
3814 } else if (contexts_disposed_ > 0) { | 3820 } else if (contexts_disposed_ > 0) { |
3815 if (FLAG_expose_gc) { | 3821 if (FLAG_expose_gc) { |
3816 contexts_disposed_ = 0; | 3822 contexts_disposed_ = 0; |
3817 } else { | 3823 } else { |
3818 HistogramTimerScope scope(&Counters::gc_context); | 3824 HistogramTimerScope scope(&Counters::gc_context); |
3819 CollectAllGarbage(false); | 3825 CollectAllGarbage(kNoGCFlags); |
3820 last_gc_count = gc_count_; | 3826 last_gc_count = gc_count_; |
3821 } | 3827 } |
3822 // If this is the first idle notification, we reset the | 3828 // If this is the first idle notification, we reset the |
3823 // notification count to avoid letting idle notifications for | 3829 // notification count to avoid letting idle notifications for |
3824 // context disposal garbage collections start a potentially too | 3830 // context disposal garbage collections start a potentially too |
3825 // aggressive idle GC cycle. | 3831 // aggressive idle GC cycle. |
3826 if (number_idle_notifications <= 1) { | 3832 if (number_idle_notifications <= 1) { |
3827 number_idle_notifications = 0; | 3833 number_idle_notifications = 0; |
3828 uncommit = false; | 3834 uncommit = false; |
3829 } | 3835 } |
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3953 | 3959 |
3954 #ifdef DEBUG | 3960 #ifdef DEBUG |
3955 static void DummyScavengePointer(HeapObject** p, HeapObject* o) { | 3961 static void DummyScavengePointer(HeapObject** p, HeapObject* o) { |
3956 // When we are not in GC the Heap::InNewSpace() predicate | 3962 // When we are not in GC the Heap::InNewSpace() predicate |
3957 // checks that pointers which satisfy predicate point into | 3963 // checks that pointers which satisfy predicate point into |
3958 // the active semispace. | 3964 // the active semispace. |
3959 Heap::InNewSpace(*p); | 3965 Heap::InNewSpace(*p); |
3960 } | 3966 } |
3961 | 3967 |
3962 | 3968 |
3963 static void VerifyPointersUnderWatermark( | 3969 static void VerifyPointers( |
3964 PagedSpace* space, | 3970 PagedSpace* space, |
3965 PointerRegionCallback visit_pointer_region) { | 3971 PointerRegionCallback visit_pointer_region) { |
3966 PageIterator it(space, PageIterator::PAGES_IN_USE); | 3972 PageIterator it(space); |
3967 | 3973 |
3968 while (it.has_next()) { | 3974 while (it.has_next()) { |
3969 Page* page = it.next(); | 3975 Page* page = it.next(); |
3970 Address start = page->ObjectAreaStart(); | 3976 Address start = page->ObjectAreaStart(); |
3971 Address end = page->AllocationWatermark(); | 3977 Address end = page->ObjectAreaEnd(); |
3972 | 3978 |
3973 Heap::IteratePointersToNewSpace(start, | 3979 Heap::IteratePointersToNewSpace(start, |
3974 end, | 3980 end, |
3975 &DummyScavengePointer); | 3981 &DummyScavengePointer); |
3976 } | 3982 } |
3977 } | 3983 } |
3978 | 3984 |
3979 | 3985 |
3980 static void VerifyPointersUnderWatermark(LargeObjectSpace* space) { | 3986 static void VerifyPointers(LargeObjectSpace* space) { |
3981 LargeObjectIterator it(space); | 3987 LargeObjectIterator it(space); |
3982 for (HeapObject* object = it.next(); object != NULL; object = it.next()) { | 3988 for (HeapObject* object = it.next(); object != NULL; object = it.next()) { |
3983 if (object->IsFixedArray()) { | 3989 if (object->IsFixedArray()) { |
3984 Address slot_address = object->address(); | 3990 Address slot_address = object->address(); |
3985 Address end = object->address() + object->Size(); | 3991 Address end = object->address() + object->Size(); |
3986 | 3992 |
3987 while (slot_address < end) { | 3993 while (slot_address < end) { |
3988 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address); | 3994 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address); |
3989 // When we are not in GC the Heap::InNewSpace() predicate | 3995 // When we are not in GC the Heap::InNewSpace() predicate |
3990 // checks that pointers which satisfy predicate point into | 3996 // checks that pointers which satisfy predicate point into |
(...skipping 12 matching lines...) Expand all Loading... | |
4003 StoreBuffer::Verify(); | 4009 StoreBuffer::Verify(); |
4004 | 4010 |
4005 VerifyPointersVisitor visitor; | 4011 VerifyPointersVisitor visitor; |
4006 IterateRoots(&visitor, VISIT_ONLY_STRONG); | 4012 IterateRoots(&visitor, VISIT_ONLY_STRONG); |
4007 | 4013 |
4008 new_space_.Verify(); | 4014 new_space_.Verify(); |
4009 | 4015 |
4010 old_pointer_space_->Verify(&visitor); | 4016 old_pointer_space_->Verify(&visitor); |
4011 map_space_->Verify(&visitor); | 4017 map_space_->Verify(&visitor); |
4012 | 4018 |
4013 VerifyPointersUnderWatermark(old_pointer_space_, | 4019 VerifyPointers(old_pointer_space_, &IteratePointersToNewSpace); |
4014 &IteratePointersToNewSpace); | 4020 VerifyPointers(map_space_, &IteratePointersFromMapsToNewSpace); |
4015 VerifyPointersUnderWatermark(map_space_, | 4021 VerifyPointers(lo_space_); |
4016 &IteratePointersFromMapsToNewSpace); | |
4017 VerifyPointersUnderWatermark(lo_space_); | |
4018 | 4022 |
4019 VerifyPointersVisitor no_dirty_regions_visitor; | 4023 VerifyPointersVisitor no_dirty_regions_visitor; |
4020 old_data_space_->Verify(&no_dirty_regions_visitor); | 4024 old_data_space_->Verify(&no_dirty_regions_visitor); |
4021 code_space_->Verify(&no_dirty_regions_visitor); | 4025 code_space_->Verify(&no_dirty_regions_visitor); |
4022 cell_space_->Verify(&no_dirty_regions_visitor); | 4026 cell_space_->Verify(&no_dirty_regions_visitor); |
4023 | 4027 |
4024 lo_space_->Verify(); | 4028 lo_space_->Verify(); |
4025 } | 4029 } |
4026 #endif // DEBUG | 4030 #endif // DEBUG |
4027 | 4031 |
(...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4203 ASSERT(StoreBuffer::CellIsInStoreBuffer( | 4207 ASSERT(StoreBuffer::CellIsInStoreBuffer( |
4204 reinterpret_cast<Address>(slot))); | 4208 reinterpret_cast<Address>(slot))); |
4205 } | 4209 } |
4206 } | 4210 } |
4207 slot_address += kPointerSize; | 4211 slot_address += kPointerSize; |
4208 } | 4212 } |
4209 } | 4213 } |
4210 | 4214 |
4211 | 4215 |
4212 #ifdef DEBUG | 4216 #ifdef DEBUG |
4217 typedef bool (*CheckStoreBufferFilter)(Object**addr); | |
Vyacheslav Egorov (Chromium)
2011/03/15 09:20:09
space after **
Erik Corry
2011/03/17 13:39:17
Done.
| |
4218 | |
4219 | |
4220 bool IsAMapPointerAddress(Object** addr) { | |
4221 uintptr_t a = reinterpret_cast<uintptr_t>(addr); | |
4222 int mod = a % Map::kSize; | |
4223 return mod >= Map::kPointerFieldsBeginOffset && | |
4224 mod < Map:: kPointerFieldsEndOffset; | |
Vyacheslav Egorov (Chromium)
2011/03/15 09:20:09
remove space after ::
Erik Corry
2011/03/17 13:39:17
Done.
| |
4225 } | |
4226 | |
4227 | |
4228 bool EverythingsAPointer(Object** addr) { | |
4229 return true; | |
4230 } | |
4231 | |
4232 | |
4213 static void CheckStoreBuffer(Object** current, | 4233 static void CheckStoreBuffer(Object** current, |
4214 Object** limit, | 4234 Object** limit, |
4215 Object**** store_buffer_position, | 4235 Object**** store_buffer_position, |
4216 Object*** store_buffer_top) { | 4236 Object*** store_buffer_top, |
4237 CheckStoreBufferFilter filter, | |
4238 Address special_garbage_start, | |
4239 Address special_garbage_end) { | |
4217 for ( ; current < limit; current++) { | 4240 for ( ; current < limit; current++) { |
4218 Object* o = *current; | 4241 Object* o = *current; |
4219 if (reinterpret_cast<uintptr_t>(o) == kFreeListZapValue) { | 4242 Address current_address = reinterpret_cast<Address>(current); |
4220 Object*** zap_checker = *store_buffer_position; | 4243 // Skip free space that is marked by byte arrays. |
4221 while (*zap_checker < current) { | 4244 if (o == Heap::byte_array_map()) { |
4222 zap_checker++; | 4245 Address current_address = reinterpret_cast<Address>(current); |
4223 if (zap_checker >= store_buffer_top) break; | 4246 ByteArray* free_space = |
4224 } | 4247 ByteArray::cast(HeapObject::FromAddress(current_address)); |
4225 if (zap_checker < store_buffer_top) { | 4248 int skip = free_space->Size(); |
4226 // Objects in the free list shouldn't be in the store buffer. | 4249 ASSERT(current_address + skip <= reinterpret_cast<Address>(limit)); |
4227 ASSERT(*zap_checker != current); | 4250 ASSERT(skip > 0); |
4228 } | 4251 current_address += skip - kPointerSize; |
4252 current = reinterpret_cast<Object**>(current_address); | |
4229 continue; | 4253 continue; |
4230 } | 4254 } |
4255 // Skip the current linear allocation space between top and limit which is | |
4256 // unmarked by byte arrays, but can contain junk. | |
4257 if (current_address == special_garbage_start && | |
4258 special_garbage_end != special_garbage_start) { | |
4259 current_address = special_garbage_end - kPointerSize; | |
4260 current = reinterpret_cast<Object**>(current_address); | |
4261 continue; | |
4262 } | |
4263 if (!(*filter)(current)) continue; | |
4264 ASSERT(current_address < special_garbage_start || | |
4265 current_address >= special_garbage_end); | |
4266 ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue); | |
4231 // We have to check that the pointer does not point into new space | 4267 // We have to check that the pointer does not point into new space |
4232 // without trying to cast it to a heap object since the hash field of | 4268 // without trying to cast it to a heap object since the hash field of |
4233 // a string can contain values like 1 and 3 which are tagged null | 4269 // a string can contain values like 1 and 3 which are tagged null |
4234 // pointers. | 4270 // pointers. |
4235 if (!Heap::InNewSpace(o)) continue; | 4271 if (!Heap::InNewSpace(o)) continue; |
4236 while (**store_buffer_position < current && | 4272 while (**store_buffer_position < current && |
4237 *store_buffer_position < store_buffer_top) { | 4273 *store_buffer_position < store_buffer_top) { |
4238 (*store_buffer_position)++; | 4274 (*store_buffer_position)++; |
4239 } | 4275 } |
4240 if (**store_buffer_position != current || | 4276 if (**store_buffer_position != current || |
4241 *store_buffer_position == store_buffer_top) { | 4277 *store_buffer_position == store_buffer_top) { |
4242 Object** obj_start = current; | 4278 Object** obj_start = current; |
4243 while (!(*obj_start)->IsMap()) obj_start--; | 4279 while (!(*obj_start)->IsMap()) obj_start--; |
4244 UNREACHABLE(); | 4280 UNREACHABLE(); |
4245 } | 4281 } |
4246 } | 4282 } |
4247 } | 4283 } |
4248 | 4284 |
4249 | 4285 |
4250 // Check that the store buffer contains all intergenerational pointers by | 4286 // Check that the store buffer contains all intergenerational pointers by |
4251 // scanning a page and ensuring that all pointers to young space are in the | 4287 // scanning a page and ensuring that all pointers to young space are in the |
4252 // store buffer. | 4288 // store buffer. |
4253 void Heap::OldPointerSpaceCheckStoreBuffer( | 4289 void Heap::OldPointerSpaceCheckStoreBuffer( |
4254 ExpectedPageWatermarkState watermark_state) { | 4290 ExpectedPageWatermarkState watermark_state) { |
4255 OldSpace* space = old_pointer_space(); | 4291 OldSpace* space = old_pointer_space(); |
4256 PageIterator pages(space, PageIterator::PAGES_IN_USE); | 4292 PageIterator pages(space); |
4257 | |
4258 space->free_list()->Zap(); | |
4259 | 4293 |
4260 StoreBuffer::SortUniq(); | 4294 StoreBuffer::SortUniq(); |
4261 | 4295 |
4262 while (pages.has_next()) { | 4296 while (pages.has_next()) { |
4263 Page* page = pages.next(); | 4297 Page* page = pages.next(); |
4264 Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart()); | 4298 Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart()); |
4265 | 4299 |
4266 // Do not try to visit pointers beyond page allocation watermark. | 4300 Address end = page->ObjectAreaEnd(); |
4267 // Page can contain garbage pointers there. | |
4268 Address end; | |
4269 | |
4270 if (watermark_state == WATERMARK_SHOULD_BE_VALID || | |
4271 page->IsWatermarkValid()) { | |
4272 end = page->AllocationWatermark(); | |
4273 } else { | |
4274 end = page->CachedAllocationWatermark(); | |
4275 } | |
4276 | 4301 |
4277 Object*** store_buffer_position = StoreBuffer::Start(); | 4302 Object*** store_buffer_position = StoreBuffer::Start(); |
4278 Object*** store_buffer_top = StoreBuffer::Top(); | 4303 Object*** store_buffer_top = StoreBuffer::Top(); |
4279 | 4304 |
4280 Object** limit = reinterpret_cast<Object**>(end); | 4305 Object** limit = reinterpret_cast<Object**>(end); |
4281 CheckStoreBuffer(current, limit, &store_buffer_position, store_buffer_top); | 4306 CheckStoreBuffer(current, |
4307 limit, | |
4308 &store_buffer_position, | |
4309 store_buffer_top, | |
4310 &EverythingsAPointer, | |
4311 space->top(), | |
4312 space->limit()); | |
4282 } | 4313 } |
4283 } | 4314 } |
4284 | 4315 |
4285 | 4316 |
4286 void Heap::MapSpaceCheckStoreBuffer( | 4317 void Heap::MapSpaceCheckStoreBuffer( |
4287 ExpectedPageWatermarkState watermark_state) { | 4318 ExpectedPageWatermarkState watermark_state) { |
4288 MapSpace* space = map_space(); | 4319 MapSpace* space = map_space(); |
4289 PageIterator pages(space, PageIterator::PAGES_IN_USE); | 4320 PageIterator pages(space); |
4290 | |
4291 space->free_list()->Zap(); | |
4292 | 4321 |
4293 StoreBuffer::SortUniq(); | 4322 StoreBuffer::SortUniq(); |
4294 | 4323 |
4295 while (pages.has_next()) { | 4324 while (pages.has_next()) { |
4296 Page* page = pages.next(); | 4325 Page* page = pages.next(); |
4326 Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart()); | |
4297 | 4327 |
4298 // Do not try to visit pointers beyond page allocation watermark. | 4328 Address end = page->ObjectAreaEnd(); |
4299 // Page can contain garbage pointers there. | |
4300 Address end; | |
4301 | |
4302 if (watermark_state == WATERMARK_SHOULD_BE_VALID || | |
4303 page->IsWatermarkValid()) { | |
4304 end = page->AllocationWatermark(); | |
4305 } else { | |
4306 end = page->CachedAllocationWatermark(); | |
4307 } | |
4308 | |
4309 Address map_aligned_current = page->ObjectAreaStart(); | |
4310 | |
4311 ASSERT(map_aligned_current == MapStartAlign(map_aligned_current)); | |
4312 ASSERT(end == MapEndAlign(end)); | |
4313 | 4329 |
4314 Object*** store_buffer_position = StoreBuffer::Start(); | 4330 Object*** store_buffer_position = StoreBuffer::Start(); |
4315 Object*** store_buffer_top = StoreBuffer::Top(); | 4331 Object*** store_buffer_top = StoreBuffer::Top(); |
4316 | 4332 |
4317 for ( ; map_aligned_current < end; map_aligned_current += Map::kSize) { | 4333 Object** limit = reinterpret_cast<Object**>(end); |
4318 ASSERT(!Heap::InNewSpace(Memory::Object_at(map_aligned_current))); | 4334 CheckStoreBuffer(current, |
4319 ASSERT(Memory::Object_at(map_aligned_current)->IsMap()); | 4335 limit, |
4320 | 4336 &store_buffer_position, |
4321 Object** current = reinterpret_cast<Object**>( | 4337 store_buffer_top, |
4322 map_aligned_current + Map::kPointerFieldsBeginOffset); | 4338 &IsAMapPointerAddress, |
4323 Object** limit = reinterpret_cast<Object**>( | 4339 space->top(), |
4324 map_aligned_current + Map::kPointerFieldsEndOffset); | 4340 space->limit()); |
4325 | |
4326 CheckStoreBuffer(current, | |
4327 limit, | |
4328 &store_buffer_position, | |
4329 store_buffer_top); | |
4330 } | |
4331 } | 4341 } |
4332 } | 4342 } |
4333 | 4343 |
4334 | 4344 |
4335 void Heap::LargeObjectSpaceCheckStoreBuffer() { | 4345 void Heap::LargeObjectSpaceCheckStoreBuffer() { |
4336 LargeObjectIterator it(lo_space()); | 4346 LargeObjectIterator it(lo_space()); |
4337 for (HeapObject* object = it.next(); object != NULL; object = it.next()) { | 4347 for (HeapObject* object = it.next(); object != NULL; object = it.next()) { |
4338 // We only have code, sequential strings, or fixed arrays in large | 4348 // We only have code, sequential strings, or fixed arrays in large |
4339 // object space, and only fixed arrays can possibly contain pointers to | 4349 // object space, and only fixed arrays can possibly contain pointers to |
4340 // the young generation. | 4350 // the young generation. |
4341 if (object->IsFixedArray()) { | 4351 if (object->IsFixedArray()) { |
4342 Object*** store_buffer_position = StoreBuffer::Start(); | 4352 Object*** store_buffer_position = StoreBuffer::Start(); |
4343 Object*** store_buffer_top = StoreBuffer::Top(); | 4353 Object*** store_buffer_top = StoreBuffer::Top(); |
4344 Object** current = reinterpret_cast<Object**>(object->address()); | 4354 Object** current = reinterpret_cast<Object**>(object->address()); |
4345 Object** limit = | 4355 Object** limit = |
4346 reinterpret_cast<Object**>(object->address() + object->Size()); | 4356 reinterpret_cast<Object**>(object->address() + object->Size()); |
4347 CheckStoreBuffer(current, | 4357 CheckStoreBuffer(current, |
4348 limit, | 4358 limit, |
4349 &store_buffer_position, | 4359 &store_buffer_position, |
4350 store_buffer_top); | 4360 store_buffer_top, |
4361 &EverythingsAPointer, | |
4362 NULL, | |
4363 NULL); | |
4351 } | 4364 } |
4352 } | 4365 } |
4353 } | 4366 } |
4354 | 4367 |
4355 | 4368 |
4356 #endif | 4369 #endif |
4357 | 4370 |
4358 | 4371 |
4372 // This function iterates over all the pointers in a paged space in the heap, | |
Vyacheslav Egorov (Chromium)
2011/03/15 09:20:09
Special garbage section is utterly confusing.
May
Erik Corry
2011/03/17 13:39:17
It's hard to draw in ASCII art, but I can do it fo
| |
4373 // looking for pointers into new space. Within the pages there may be dead | |
4374 // objects that have not been overwritten by byte arrays or fillers because of | |
4375 // lazy sweeping. These dead objects may not contain pointers to new space. | |
4376 // The garbage areas that have been swept properly (these will normally be the | |
4377 // large ones) will be marked with byte array and filler map words. In | |
4378 // addition any area that has never been used at all for object allocation must | |
4379 // be marked with a byte array or filler. Because the byte array and filler | |
4380 // maps do not move we can always recognize these even after a compaction. | |
4381 // Normal objects like FixedArrays and JSObjects should not contain references | |
4382 // to these maps. The special garbage section (see comment in spaces.h) is | |
4383 // skipped since it can contain absolutely anything. Any objects that are | |
4384 // allocated during iteration may or may not be visited by the iteration, but | |
4385 // they will not be partially visited. | |
4359 void Heap::IteratePointers( | 4386 void Heap::IteratePointers( |
4360 PagedSpace* space, | 4387 PagedSpace* space, |
4361 PointerRegionCallback visit_pointer_region, | 4388 PointerRegionCallback visit_pointer_region, |
4362 ObjectSlotCallback copy_object_func, | 4389 ObjectSlotCallback copy_object_func, |
4363 ExpectedPageWatermarkState expected_page_watermark_state) { | 4390 ExpectedPageWatermarkState expected_page_watermark_state) { |
4364 | 4391 |
4365 PageIterator pages(space, PageIterator::PAGES_IN_USE); | 4392 PageIterator pages(space); |
4366 | 4393 |
4367 while (pages.has_next()) { | 4394 while (pages.has_next()) { |
4368 Page* page = pages.next(); | 4395 Page* page = pages.next(); |
4369 Address start = page->ObjectAreaStart(); | 4396 Address start = page->ObjectAreaStart(); |
4397 Address limit = page->ObjectAreaEnd(); | |
4370 | 4398 |
4371 // Do not try to visit pointers beyond page allocation watermark. | 4399 Address end = start; |
4372 // Page can contain garbage pointers there. | |
4373 Address end; | |
4374 | 4400 |
4375 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) || | 4401 Object* byte_array_map = Heap::byte_array_map(); |
4376 page->IsWatermarkValid()) { | 4402 Object* two_pointer_filler_map = Heap::two_pointer_filler_map(); |
4377 end = page->AllocationWatermark(); | 4403 |
4378 } else { | 4404 while (end < limit) { |
4379 end = page->CachedAllocationWatermark(); | 4405 Object* o = *reinterpret_cast<Object**>(end); |
4406 // Skip fillers but not things that look like fillers in the special | |
4407 // garbage section which can contain anything. | |
4408 if (o == byte_array_map || | |
4409 o == two_pointer_filler_map || | |
4410 end == space->top()) { | |
4411 if (start != end) { | |
4412 // After calling this the special garbage section may have moved. | |
4413 visit_pointer_region(start, end, copy_object_func); | |
4414 if (end >= space->top() && end < space->limit()) { | |
4415 end = space->limit(); | |
4416 start = end; | |
4417 continue; | |
4418 } | |
4419 } | |
4420 if (end == space->top()) { | |
4421 start = end = space->limit(); | |
4422 } else { | |
4423 // At this point we are either at the start of a filler or we are at | |
4424 // the point where the space->top() used to be before the | |
4425 // visit_pointer_region call above. Either way we can skip the | |
4426 // object at the current spot: We don't promise to visit objects | |
4427 // allocated during heap traversal, and if space->top() moved then it | |
4428 // must be because an object was allocated at this point. | |
4429 start = end + HeapObject::FromAddress(end)->Size(); | |
4430 end = start; | |
4431 } | |
4432 } else { | |
4433 ASSERT(o != byte_array_map); | |
4434 ASSERT(o != two_pointer_filler_map); | |
4435 ASSERT(end < space->top() || end >= space->limit()); | |
4436 end += kPointerSize; | |
4437 } | |
4380 } | 4438 } |
4381 | 4439 ASSERT(end == limit); |
4382 ASSERT(space == old_pointer_space_ || | 4440 if (start != end) { |
4383 (space == map_space_ && | 4441 visit_pointer_region(start, end, copy_object_func); |
4384 ((page->ObjectAreaStart() - end) % Map::kSize == 0))); | 4442 } |
4385 | |
4386 visit_pointer_region(start, end, copy_object_func); | |
4387 | |
4388 // Mark page watermark as invalid to maintain watermark validity invariant. | |
4389 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details. | |
4390 page->InvalidateWatermark(true); | |
4391 } | 4443 } |
4392 } | 4444 } |
4393 | 4445 |
4394 | 4446 |
4395 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { | 4447 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { |
4396 IterateStrongRoots(v, mode); | 4448 IterateStrongRoots(v, mode); |
4397 IterateWeakRoots(v, mode); | 4449 IterateWeakRoots(v, mode); |
4398 } | 4450 } |
4399 | 4451 |
4400 | 4452 |
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4547 *stats->map_space_capacity = map_space_->Capacity(); | 4599 *stats->map_space_capacity = map_space_->Capacity(); |
4548 *stats->cell_space_size = cell_space_->Size(); | 4600 *stats->cell_space_size = cell_space_->Size(); |
4549 *stats->cell_space_capacity = cell_space_->Capacity(); | 4601 *stats->cell_space_capacity = cell_space_->Capacity(); |
4550 *stats->lo_space_size = lo_space_->Size(); | 4602 *stats->lo_space_size = lo_space_->Size(); |
4551 GlobalHandles::RecordStats(stats); | 4603 GlobalHandles::RecordStats(stats); |
4552 *stats->memory_allocator_size = MemoryAllocator::Size(); | 4604 *stats->memory_allocator_size = MemoryAllocator::Size(); |
4553 *stats->memory_allocator_capacity = | 4605 *stats->memory_allocator_capacity = |
4554 MemoryAllocator::Size() + MemoryAllocator::Available(); | 4606 MemoryAllocator::Size() + MemoryAllocator::Available(); |
4555 *stats->os_error = OS::GetLastError(); | 4607 *stats->os_error = OS::GetLastError(); |
4556 if (take_snapshot) { | 4608 if (take_snapshot) { |
4557 HeapIterator iterator(HeapIterator::kFilterFreeListNodes); | 4609 HeapIterator iterator; |
4558 for (HeapObject* obj = iterator.next(); | 4610 for (HeapObject* obj = iterator.Next(); |
4559 obj != NULL; | 4611 obj != NULL; |
4560 obj = iterator.next()) { | 4612 obj = iterator.Next()) { |
4561 InstanceType type = obj->map()->instance_type(); | 4613 InstanceType type = obj->map()->instance_type(); |
4562 ASSERT(0 <= type && type <= LAST_TYPE); | 4614 ASSERT(0 <= type && type <= LAST_TYPE); |
4563 stats->objects_per_type[type]++; | 4615 stats->objects_per_type[type]++; |
4564 stats->size_per_type[type] += obj->Size(); | 4616 stats->size_per_type[type] += obj->Size(); |
4565 } | 4617 } |
4566 } | 4618 } |
4567 } | 4619 } |
4568 | 4620 |
4569 | 4621 |
4570 intptr_t Heap::PromotedSpaceSize() { | 4622 intptr_t Heap::PromotedSpaceSize() { |
(...skipping 410 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4981 } | 5033 } |
4982 | 5034 |
4983 | 5035 |
4984 class HeapObjectsFilter { | 5036 class HeapObjectsFilter { |
4985 public: | 5037 public: |
4986 virtual ~HeapObjectsFilter() {} | 5038 virtual ~HeapObjectsFilter() {} |
4987 virtual bool SkipObject(HeapObject* object) = 0; | 5039 virtual bool SkipObject(HeapObject* object) = 0; |
4988 }; | 5040 }; |
4989 | 5041 |
4990 | 5042 |
4991 class FreeListNodesFilter : public HeapObjectsFilter { | |
4992 public: | |
4993 FreeListNodesFilter() { | |
4994 MarkFreeListNodes(); | |
4995 } | |
4996 | |
4997 bool SkipObject(HeapObject* object) { | |
4998 if (IntrusiveMarking::IsMarked(object)) { | |
4999 IntrusiveMarking::ClearMark(object); | |
5000 return true; | |
5001 } else { | |
5002 return false; | |
5003 } | |
5004 } | |
5005 | |
5006 private: | |
5007 void MarkFreeListNodes() { | |
5008 Heap::old_pointer_space()->MarkFreeListNodes(); | |
5009 Heap::old_data_space()->MarkFreeListNodes(); | |
5010 MarkCodeSpaceFreeListNodes(); | |
5011 Heap::map_space()->MarkFreeListNodes(); | |
5012 Heap::cell_space()->MarkFreeListNodes(); | |
5013 } | |
5014 | |
5015 void MarkCodeSpaceFreeListNodes() { | |
5016 // For code space, using FreeListNode::IsFreeListNode is OK. | |
5017 HeapObjectIterator iter(Heap::code_space()); | |
5018 for (HeapObject* obj = iter.next_object(); | |
5019 obj != NULL; | |
5020 obj = iter.next_object()) { | |
5021 if (FreeListNode::IsFreeListNode(obj)) { | |
5022 IntrusiveMarking::SetMark(obj); | |
5023 } | |
5024 } | |
5025 } | |
5026 | |
5027 AssertNoAllocation no_alloc; | |
5028 }; | |
5029 | |
5030 | |
5031 class UnreachableObjectsFilter : public HeapObjectsFilter { | 5043 class UnreachableObjectsFilter : public HeapObjectsFilter { |
5032 public: | 5044 public: |
5033 UnreachableObjectsFilter() { | 5045 UnreachableObjectsFilter() { |
5034 MarkUnreachableObjects(); | 5046 MarkUnreachableObjects(); |
5035 } | 5047 } |
5036 | 5048 |
5037 bool SkipObject(HeapObject* object) { | 5049 bool SkipObject(HeapObject* object) { |
5038 if (IntrusiveMarking::IsMarked(object)) { | 5050 if (IntrusiveMarking::IsMarked(object)) { |
5039 IntrusiveMarking::ClearMark(object); | 5051 IntrusiveMarking::ClearMark(object); |
5040 return true; | 5052 return true; |
(...skipping 24 matching lines...) Expand all Loading... | |
5065 HeapObject* obj = list_.RemoveLast(); | 5077 HeapObject* obj = list_.RemoveLast(); |
5066 obj->Iterate(this); | 5078 obj->Iterate(this); |
5067 } | 5079 } |
5068 | 5080 |
5069 private: | 5081 private: |
5070 List<HeapObject*> list_; | 5082 List<HeapObject*> list_; |
5071 }; | 5083 }; |
5072 | 5084 |
5073 void MarkUnreachableObjects() { | 5085 void MarkUnreachableObjects() { |
5074 HeapIterator iterator; | 5086 HeapIterator iterator; |
5075 for (HeapObject* obj = iterator.next(); | 5087 for (HeapObject* obj = iterator.Next(); |
5076 obj != NULL; | 5088 obj != NULL; |
5077 obj = iterator.next()) { | 5089 obj = iterator.Next()) { |
5078 IntrusiveMarking::SetMark(obj); | 5090 IntrusiveMarking::SetMark(obj); |
5079 } | 5091 } |
5080 UnmarkingVisitor visitor; | 5092 UnmarkingVisitor visitor; |
5081 Heap::IterateRoots(&visitor, VISIT_ALL); | 5093 Heap::IterateRoots(&visitor, VISIT_ALL); |
5082 while (visitor.can_process()) | 5094 while (visitor.can_process()) |
5083 visitor.ProcessNext(); | 5095 visitor.ProcessNext(); |
5084 } | 5096 } |
5085 | 5097 |
5086 AssertNoAllocation no_alloc; | 5098 AssertNoAllocation no_alloc; |
5087 }; | 5099 }; |
5088 | 5100 |
5089 | 5101 |
5090 HeapIterator::HeapIterator() | 5102 HeapIterator::HeapIterator() { |
5091 : filtering_(HeapIterator::kNoFiltering), | |
5092 filter_(NULL) { | |
5093 Init(); | |
5094 } | |
5095 | |
5096 | |
5097 HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering) | |
5098 : filtering_(filtering), | |
5099 filter_(NULL) { | |
5100 Init(); | 5103 Init(); |
5101 } | 5104 } |
5102 | 5105 |
5103 | 5106 |
5104 HeapIterator::~HeapIterator() { | 5107 HeapIterator::~HeapIterator() { |
5105 Shutdown(); | 5108 Shutdown(); |
5106 } | 5109 } |
5107 | 5110 |
5108 | 5111 |
5109 void HeapIterator::Init() { | 5112 void HeapIterator::Init() { |
5110 // Start the iteration. | 5113 // Start the iteration. |
5111 space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator : | 5114 space_iterator_ = new SpaceIterator(); |
5112 new SpaceIterator(IntrusiveMarking::SizeOfMarkedObject); | |
5113 switch (filtering_) { | |
5114 case kFilterFreeListNodes: | |
5115 filter_ = new FreeListNodesFilter; | |
5116 break; | |
5117 case kFilterUnreachable: | |
5118 filter_ = new UnreachableObjectsFilter; | |
5119 break; | |
5120 default: | |
5121 break; | |
5122 } | |
5123 object_iterator_ = space_iterator_->next(); | 5115 object_iterator_ = space_iterator_->next(); |
5124 } | 5116 } |
5125 | 5117 |
5126 | 5118 |
5127 void HeapIterator::Shutdown() { | 5119 void HeapIterator::Shutdown() { |
5128 #ifdef DEBUG | |
5129 // Assert that in filtering mode we have iterated through all | |
5130 // objects. Otherwise, heap will be left in an inconsistent state. | |
5131 if (filtering_ != kNoFiltering) { | |
5132 ASSERT(object_iterator_ == NULL); | |
5133 } | |
5134 #endif | |
5135 // Make sure the last iterator is deallocated. | 5120 // Make sure the last iterator is deallocated. |
5136 delete space_iterator_; | 5121 delete space_iterator_; |
5137 space_iterator_ = NULL; | 5122 space_iterator_ = NULL; |
5138 object_iterator_ = NULL; | 5123 object_iterator_ = NULL; |
5139 delete filter_; | |
5140 filter_ = NULL; | |
5141 } | 5124 } |
5142 | 5125 |
5143 | 5126 |
5144 HeapObject* HeapIterator::next() { | 5127 HeapObject* HeapIterator::Next() { |
5145 if (filter_ == NULL) return NextObject(); | |
5146 | |
5147 HeapObject* obj = NextObject(); | |
5148 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject(); | |
5149 return obj; | |
5150 } | |
5151 | |
5152 | |
5153 HeapObject* HeapIterator::NextObject() { | |
5154 // No iterator means we are done. | 5128 // No iterator means we are done. |
5155 if (object_iterator_ == NULL) return NULL; | 5129 if (object_iterator_ == NULL) return NULL; |
5156 | 5130 |
5157 if (HeapObject* obj = object_iterator_->next_object()) { | 5131 if (HeapObject* obj = object_iterator_->next_object()) { |
5158 // If the current iterator has more objects we are fine. | 5132 // If the current iterator has more objects we are fine. |
5159 return obj; | 5133 return obj; |
5160 } else { | 5134 } else { |
5161 // Go though the spaces looking for one that has objects. | 5135 // Go though the spaces looking for one that has objects. |
5162 while (space_iterator_->has_next()) { | 5136 while (space_iterator_->has_next()) { |
5163 object_iterator_ = space_iterator_->next(); | 5137 object_iterator_ = space_iterator_->next(); |
5164 if (HeapObject* obj = object_iterator_->next_object()) { | 5138 if (HeapObject* obj = object_iterator_->next_object()) { |
5165 return obj; | 5139 return obj; |
5166 } | 5140 } |
5167 } | 5141 } |
5168 } | 5142 } |
5169 // Done with the last space. | 5143 // Done with the last space. |
5170 object_iterator_ = NULL; | 5144 object_iterator_ = NULL; |
5171 return NULL; | 5145 return NULL; |
5172 } | 5146 } |
5173 | 5147 |
5174 | 5148 |
5175 void HeapIterator::reset() { | 5149 void HeapIterator::Reset() { |
5176 // Restart the iterator. | 5150 // Restart the iterator. |
5177 Shutdown(); | 5151 Shutdown(); |
5178 Init(); | 5152 Init(); |
5179 } | 5153 } |
5180 | 5154 |
5181 | 5155 |
5182 #ifdef DEBUG | 5156 #ifdef DEBUG |
5183 | 5157 |
5184 static bool search_for_any_global; | 5158 static bool search_for_any_global; |
5185 static Object* search_target; | 5159 static Object* search_target; |
(...skipping 157 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5343 } | 5317 } |
5344 #endif | 5318 #endif |
5345 | 5319 |
5346 | 5320 |
5347 static intptr_t CountTotalHolesSize() { | 5321 static intptr_t CountTotalHolesSize() { |
5348 intptr_t holes_size = 0; | 5322 intptr_t holes_size = 0; |
5349 OldSpaces spaces; | 5323 OldSpaces spaces; |
5350 for (OldSpace* space = spaces.next(); | 5324 for (OldSpace* space = spaces.next(); |
5351 space != NULL; | 5325 space != NULL; |
5352 space = spaces.next()) { | 5326 space = spaces.next()) { |
5353 holes_size += space->Waste() + space->AvailableFree(); | 5327 holes_size += space->Waste() + space->Available(); |
5354 } | 5328 } |
5355 return holes_size; | 5329 return holes_size; |
5356 } | 5330 } |
5357 | 5331 |
5358 | 5332 |
5359 GCTracer::GCTracer() | 5333 GCTracer::GCTracer() |
5360 : start_time_(0.0), | 5334 : start_time_(0.0), |
5361 start_size_(0), | 5335 start_size_(0), |
5362 gc_count_(0), | 5336 gc_count_(0), |
5363 full_gc_count_(0), | 5337 full_gc_count_(0), |
(...skipping 220 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5584 void ExternalStringTable::TearDown() { | 5558 void ExternalStringTable::TearDown() { |
5585 new_space_strings_.Free(); | 5559 new_space_strings_.Free(); |
5586 old_space_strings_.Free(); | 5560 old_space_strings_.Free(); |
5587 } | 5561 } |
5588 | 5562 |
5589 | 5563 |
5590 List<Object*> ExternalStringTable::new_space_strings_; | 5564 List<Object*> ExternalStringTable::new_space_strings_; |
5591 List<Object*> ExternalStringTable::old_space_strings_; | 5565 List<Object*> ExternalStringTable::old_space_strings_; |
5592 | 5566 |
5593 } } // namespace v8::internal | 5567 } } // namespace v8::internal |
OLD | NEW |