Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(223)

Side by Side Diff: src/heap/heap.cc

Issue 2801073006: Decouple root visitors from object visitors. (Closed)
Patch Set: rebase Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/heap.h" 5 #include "src/heap/heap.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/assembler-inl.h" 9 #include "src/assembler-inl.h"
10 #include "src/ast/context-slot-cache.h" 10 #include "src/ast/context-slot-cache.h"
(...skipping 1694 matching lines...) Expand 10 before | Expand all | Expand 10 after
1705 // objects lie between a 'front' mark and a 'rear' mark that is 1705 // objects lie between a 'front' mark and a 'rear' mark that is
1706 // updated as a side effect of promoting an object. 1706 // updated as a side effect of promoting an object.
1707 // 1707 //
1708 // There is guaranteed to be enough room at the top of the to space 1708 // There is guaranteed to be enough room at the top of the to space
1709 // for the addresses of promoted objects: every object promoted 1709 // for the addresses of promoted objects: every object promoted
1710 // frees up its size in bytes from the top of the new space, and 1710 // frees up its size in bytes from the top of the new space, and
1711 // objects are at least one pointer in size. 1711 // objects are at least one pointer in size.
1712 Address new_space_front = new_space_->ToSpaceStart(); 1712 Address new_space_front = new_space_->ToSpaceStart();
1713 promotion_queue_.Initialize(); 1713 promotion_queue_.Initialize();
1714 1714
1715 ScavengeVisitor scavenge_visitor(this); 1715 RootScavengeVisitor root_scavenge_visitor(this);
1716 1716
1717 isolate()->global_handles()->IdentifyWeakUnmodifiedObjects( 1717 isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
1718 &IsUnmodifiedHeapObject); 1718 &IsUnmodifiedHeapObject);
1719 1719
1720 { 1720 {
1721 // Copy roots. 1721 // Copy roots.
1722 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_ROOTS); 1722 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_ROOTS);
1723 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE); 1723 IterateRoots(&root_scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1724 } 1724 }
1725 1725
1726 { 1726 {
1727 // Copy objects reachable from the old generation. 1727 // Copy objects reachable from the old generation.
1728 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS); 1728 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
1729 RememberedSet<OLD_TO_NEW>::Iterate(this, [this](Address addr) { 1729 RememberedSet<OLD_TO_NEW>::Iterate(this, [this](Address addr) {
1730 return Scavenger::CheckAndScavengeObject(this, addr); 1730 return Scavenger::CheckAndScavengeObject(this, addr);
1731 }); 1731 });
1732 1732
1733 RememberedSet<OLD_TO_NEW>::IterateTyped( 1733 RememberedSet<OLD_TO_NEW>::IterateTyped(
1734 this, [this](SlotType type, Address host_addr, Address addr) { 1734 this, [this](SlotType type, Address host_addr, Address addr) {
1735 return UpdateTypedSlotHelper::UpdateTypedSlot( 1735 return UpdateTypedSlotHelper::UpdateTypedSlot(
1736 isolate(), type, addr, [this](Object** addr) { 1736 isolate(), type, addr, [this](Object** addr) {
1737 // We expect that objects referenced by code are long living. 1737 // We expect that objects referenced by code are long living.
1738 // If we do not force promotion, then we need to clear 1738 // If we do not force promotion, then we need to clear
1739 // old_to_new slots in dead code objects after mark-compact. 1739 // old_to_new slots in dead code objects after mark-compact.
1740 return Scavenger::CheckAndScavengeObject( 1740 return Scavenger::CheckAndScavengeObject(
1741 this, reinterpret_cast<Address>(addr)); 1741 this, reinterpret_cast<Address>(addr));
1742 }); 1742 });
1743 }); 1743 });
1744 } 1744 }
1745 1745
1746 { 1746 {
1747 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_WEAK); 1747 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_WEAK);
1748 // Copy objects reachable from the encountered weak collections list. 1748 IterateEncounteredWeakCollections(&root_scavenge_visitor);
1749 scavenge_visitor.VisitPointer(&encountered_weak_collections_);
1750 } 1749 }
1751 1750
1752 { 1751 {
1753 // Copy objects reachable from the code flushing candidates list. 1752 // Copy objects reachable from the code flushing candidates list.
1754 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_CODE_FLUSH_CANDIDATES); 1753 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_CODE_FLUSH_CANDIDATES);
1755 MarkCompactCollector* collector = mark_compact_collector(); 1754 MarkCompactCollector* collector = mark_compact_collector();
1756 if (collector->is_code_flushing_enabled()) { 1755 if (collector->is_code_flushing_enabled()) {
1757 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor); 1756 collector->code_flusher()->IteratePointersToFromSpace(
1757 &root_scavenge_visitor);
1758 } 1758 }
1759 } 1759 }
1760 1760
1761 { 1761 {
1762 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE); 1762 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE);
1763 new_space_front = DoScavenge(&scavenge_visitor, new_space_front); 1763 new_space_front = DoScavenge(new_space_front);
1764 } 1764 }
1765 1765
1766 isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending( 1766 isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
1767 &IsUnscavengedHeapObject); 1767 &IsUnscavengedHeapObject);
1768 1768
1769 isolate() 1769 isolate()
1770 ->global_handles() 1770 ->global_handles()
1771 ->IterateNewSpaceWeakUnmodifiedRoots< 1771 ->IterateNewSpaceWeakUnmodifiedRoots<
1772 GlobalHandles::HANDLE_PHANTOM_NODES_VISIT_OTHERS>(&scavenge_visitor); 1772 GlobalHandles::HANDLE_PHANTOM_NODES_VISIT_OTHERS>(
1773 new_space_front = DoScavenge(&scavenge_visitor, new_space_front); 1773 &root_scavenge_visitor);
1774 new_space_front = DoScavenge(new_space_front);
1774 1775
1775 UpdateNewSpaceReferencesInExternalStringTable( 1776 UpdateNewSpaceReferencesInExternalStringTable(
1776 &UpdateNewSpaceReferenceInExternalStringTableEntry); 1777 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1777 1778
1778 promotion_queue_.Destroy(); 1779 promotion_queue_.Destroy();
1779 1780
1780 incremental_marking()->UpdateMarkingDequeAfterScavenge(); 1781 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1781 1782
1782 ScavengeWeakObjectRetainer weak_object_retainer(this); 1783 ScavengeWeakObjectRetainer weak_object_retainer(this);
1783 ProcessYoungWeakReferences(&weak_object_retainer); 1784 ProcessYoungWeakReferences(&weak_object_retainer);
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after
1951 old_generation_survival_rate); 1952 old_generation_survival_rate);
1952 } 1953 }
1953 } 1954 }
1954 } 1955 }
1955 1956
1956 1957
1957 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) { 1958 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1958 DisallowHeapAllocation no_allocation; 1959 DisallowHeapAllocation no_allocation;
1959 // All external strings are listed in the external string table. 1960 // All external strings are listed in the external string table.
1960 1961
1961 class ExternalStringTableVisitorAdapter : public ObjectVisitor { 1962 class ExternalStringTableVisitorAdapter : public RootVisitor {
1962 public: 1963 public:
1963 explicit ExternalStringTableVisitorAdapter( 1964 explicit ExternalStringTableVisitorAdapter(
1964 v8::ExternalResourceVisitor* visitor) 1965 v8::ExternalResourceVisitor* visitor)
1965 : visitor_(visitor) {} 1966 : visitor_(visitor) {}
1966 virtual void VisitPointers(Object** start, Object** end) { 1967 virtual void VisitRootPointers(Root root, Object** start, Object** end) {
1967 for (Object** p = start; p < end; p++) { 1968 for (Object** p = start; p < end; p++) {
1968 DCHECK((*p)->IsExternalString()); 1969 DCHECK((*p)->IsExternalString());
1969 visitor_->VisitExternalString( 1970 visitor_->VisitExternalString(
1970 Utils::ToLocal(Handle<String>(String::cast(*p)))); 1971 Utils::ToLocal(Handle<String>(String::cast(*p))));
1971 } 1972 }
1972 } 1973 }
1973 1974
1974 private: 1975 private:
1975 v8::ExternalResourceVisitor* visitor_; 1976 v8::ExternalResourceVisitor* visitor_;
1976 } external_string_table_visitor(visitor); 1977 } external_string_table_visitor(visitor);
1977 1978
1978 external_string_table_.IterateAll(&external_string_table_visitor); 1979 external_string_table_.IterateAll(&external_string_table_visitor);
1979 } 1980 }
1980 1981
1981 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor, 1982 Address Heap::DoScavenge(Address new_space_front) {
1982 Address new_space_front) {
1983 do { 1983 do {
1984 SemiSpace::AssertValidRange(new_space_front, new_space_->top()); 1984 SemiSpace::AssertValidRange(new_space_front, new_space_->top());
1985 // The addresses new_space_front and new_space_.top() define a 1985 // The addresses new_space_front and new_space_.top() define a
1986 // queue of unprocessed copied objects. Process them until the 1986 // queue of unprocessed copied objects. Process them until the
1987 // queue is empty. 1987 // queue is empty.
1988 while (new_space_front != new_space_->top()) { 1988 while (new_space_front != new_space_->top()) {
1989 if (!Page::IsAlignedToPageSize(new_space_front)) { 1989 if (!Page::IsAlignedToPageSize(new_space_front)) {
1990 HeapObject* object = HeapObject::FromAddress(new_space_front); 1990 HeapObject* object = HeapObject::FromAddress(new_space_front);
1991 new_space_front += 1991 new_space_front +=
1992 StaticScavengeVisitor::IterateBody(object->map(), object); 1992 StaticScavengeVisitor::IterateBody(object->map(), object);
(...skipping 2772 matching lines...) Expand 10 before | Expand all | Expand 10 after
4765 #undef INTERNALIZED_STRING 4765 #undef INTERNALIZED_STRING
4766 #define STRING_TYPE(NAME, size, name, Name) case Heap::k##Name##MapRootIndex: 4766 #define STRING_TYPE(NAME, size, name, Name) case Heap::k##Name##MapRootIndex:
4767 STRING_TYPE_LIST(STRING_TYPE) 4767 STRING_TYPE_LIST(STRING_TYPE)
4768 #undef STRING_TYPE 4768 #undef STRING_TYPE
4769 return true; 4769 return true;
4770 default: 4770 default:
4771 return false; 4771 return false;
4772 } 4772 }
4773 } 4773 }
4774 4774
4775
4776 #ifdef VERIFY_HEAP 4775 #ifdef VERIFY_HEAP
4777 void Heap::Verify() { 4776 void Heap::Verify() {
4778 CHECK(HasBeenSetUp()); 4777 CHECK(HasBeenSetUp());
4779 HandleScope scope(isolate()); 4778 HandleScope scope(isolate());
4780 4779
4781 // We have to wait here for the sweeper threads to have an iterable heap. 4780 // We have to wait here for the sweeper threads to have an iterable heap.
4782 mark_compact_collector()->EnsureSweepingCompleted(); 4781 mark_compact_collector()->EnsureSweepingCompleted();
4783 4782
4784 VerifyPointersVisitor visitor; 4783 VerifyPointersVisitor visitor;
4785 IterateRoots(&visitor, VISIT_ONLY_STRONG); 4784 IterateRoots(&visitor, VISIT_ONLY_STRONG);
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
4897 // TODO(hpayer): Implement a special promotion visitor that incorporates 4896 // TODO(hpayer): Implement a special promotion visitor that incorporates
4898 // regular visiting and IteratePromotedObjectPointers. 4897 // regular visiting and IteratePromotedObjectPointers.
4899 if (!was_marked_black) { 4898 if (!was_marked_black) {
4900 if (incremental_marking()->black_allocation()) { 4899 if (incremental_marking()->black_allocation()) {
4901 IncrementalMarking::MarkGrey(this, target->map()); 4900 IncrementalMarking::MarkGrey(this, target->map());
4902 incremental_marking()->IterateBlackObject(target); 4901 incremental_marking()->IterateBlackObject(target);
4903 } 4902 }
4904 } 4903 }
4905 } 4904 }
4906 4905
4907 4906 void Heap::IterateRoots(RootVisitor* v, VisitMode mode) {
4908 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4909 IterateStrongRoots(v, mode); 4907 IterateStrongRoots(v, mode);
4910 IterateWeakRoots(v, mode); 4908 IterateWeakRoots(v, mode);
4911 } 4909 }
4912 4910
4913 4911 void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
4914 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) { 4912 v->VisitRootPointer(Root::kStringTable, reinterpret_cast<Object**>(
4915 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex])); 4913 &roots_[kStringTableRootIndex]));
4916 v->Synchronize(VisitorSynchronization::kStringTable); 4914 v->Synchronize(VisitorSynchronization::kStringTable);
4917 if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) { 4915 if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
4918 // Scavenge collections have special processing for this. 4916 // Scavenge collections have special processing for this.
4919 external_string_table_.IterateAll(v); 4917 external_string_table_.IterateAll(v);
4920 } 4918 }
4921 v->Synchronize(VisitorSynchronization::kExternalStringsTable); 4919 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
4922 } 4920 }
4923 4921
4924 4922 void Heap::IterateSmiRoots(RootVisitor* v) {
4925 void Heap::IterateSmiRoots(ObjectVisitor* v) {
4926 // Acquire execution access since we are going to read stack limit values. 4923 // Acquire execution access since we are going to read stack limit values.
4927 ExecutionAccess access(isolate()); 4924 ExecutionAccess access(isolate());
4928 v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]); 4925 v->VisitRootPointers(Root::kSmiRootList, &roots_[kSmiRootsStart],
4926 &roots_[kRootListLength]);
4929 v->Synchronize(VisitorSynchronization::kSmiRootList); 4927 v->Synchronize(VisitorSynchronization::kSmiRootList);
4930 } 4928 }
4931 4929
4930 void Heap::IterateEncounteredWeakCollections(RootVisitor* visitor) {
4931 visitor->VisitRootPointer(Root::kWeakCollections,
4932 &encountered_weak_collections_);
4933 }
4934
4932 // We cannot avoid stale handles to left-trimmed objects, but can only make 4935 // We cannot avoid stale handles to left-trimmed objects, but can only make
4933 // sure all handles still needed are updated. Filter out a stale pointer 4936 // sure all handles still needed are updated. Filter out a stale pointer
4934 // and clear the slot to allow post processing of handles (needed because 4937 // and clear the slot to allow post processing of handles (needed because
4935 // the sweeper might actually free the underlying page). 4938 // the sweeper might actually free the underlying page).
4936 class FixStaleLeftTrimmedHandlesVisitor : public ObjectVisitor { 4939 class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
4937 public: 4940 public:
4938 explicit FixStaleLeftTrimmedHandlesVisitor(Heap* heap) : heap_(heap) { 4941 explicit FixStaleLeftTrimmedHandlesVisitor(Heap* heap) : heap_(heap) {
4939 USE(heap_); 4942 USE(heap_);
4940 } 4943 }
4941 4944
4942 void VisitPointer(Object** p) override { FixHandle(p); } 4945 void VisitRootPointer(Root root, Object** p) override { FixHandle(p); }
4943 4946
4944 void VisitPointers(Object** start, Object** end) override { 4947 void VisitRootPointers(Root root, Object** start, Object** end) override {
4945 for (Object** p = start; p < end; p++) FixHandle(p); 4948 for (Object** p = start; p < end; p++) FixHandle(p);
4946 } 4949 }
4947 4950
4948 private: 4951 private:
4949 inline void FixHandle(Object** p) { 4952 inline void FixHandle(Object** p) {
4950 HeapObject* current = reinterpret_cast<HeapObject*>(*p); 4953 HeapObject* current = reinterpret_cast<HeapObject*>(*p);
4951 if (!current->IsHeapObject()) return; 4954 if (!current->IsHeapObject()) return;
4952 const MapWord map_word = current->map_word(); 4955 const MapWord map_word = current->map_word();
4953 if (!map_word.IsForwardingAddress() && current->IsFiller()) { 4956 if (!map_word.IsForwardingAddress() && current->IsFiller()) {
4954 #ifdef DEBUG 4957 #ifdef DEBUG
(...skipping 11 matching lines...) Expand all
4966 } 4969 }
4967 DCHECK(current->IsFixedArrayBase()); 4970 DCHECK(current->IsFixedArrayBase());
4968 #endif // DEBUG 4971 #endif // DEBUG
4969 *p = nullptr; 4972 *p = nullptr;
4970 } 4973 }
4971 } 4974 }
4972 4975
4973 Heap* heap_; 4976 Heap* heap_;
4974 }; 4977 };
4975 4978
4976 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) { 4979 void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
4977 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]); 4980 v->VisitRootPointers(Root::kStrongRootList, &roots_[0],
4981 &roots_[kStrongRootListLength]);
4978 v->Synchronize(VisitorSynchronization::kStrongRootList); 4982 v->Synchronize(VisitorSynchronization::kStrongRootList);
4979 // The serializer/deserializer iterates the root list twice, first to pick 4983 // The serializer/deserializer iterates the root list twice, first to pick
4980 // off immortal immovable roots to make sure they end up on the first page, 4984 // off immortal immovable roots to make sure they end up on the first page,
4981 // and then again for the rest. 4985 // and then again for the rest.
4982 if (mode == VISIT_ONLY_STRONG_ROOT_LIST) return; 4986 if (mode == VISIT_ONLY_STRONG_ROOT_LIST) return;
4983 4987
4984 isolate_->bootstrapper()->Iterate(v); 4988 isolate_->bootstrapper()->Iterate(v);
4985 v->Synchronize(VisitorSynchronization::kBootstrapper); 4989 v->Synchronize(VisitorSynchronization::kBootstrapper);
4986 isolate_->Iterate(v); 4990 isolate_->Iterate(v);
4987 v->Synchronize(VisitorSynchronization::kTop); 4991 v->Synchronize(VisitorSynchronization::kTop);
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
5037 isolate_->eternal_handles()->IterateAllRoots(v); 5041 isolate_->eternal_handles()->IterateAllRoots(v);
5038 } 5042 }
5039 v->Synchronize(VisitorSynchronization::kEternalHandles); 5043 v->Synchronize(VisitorSynchronization::kEternalHandles);
5040 5044
5041 // Iterate over pointers being held by inactive threads. 5045 // Iterate over pointers being held by inactive threads.
5042 isolate_->thread_manager()->Iterate(v); 5046 isolate_->thread_manager()->Iterate(v);
5043 v->Synchronize(VisitorSynchronization::kThreadManager); 5047 v->Synchronize(VisitorSynchronization::kThreadManager);
5044 5048
5045 // Iterate over other strong roots (currently only identity maps). 5049 // Iterate over other strong roots (currently only identity maps).
5046 for (StrongRootsList* list = strong_roots_list_; list; list = list->next) { 5050 for (StrongRootsList* list = strong_roots_list_; list; list = list->next) {
5047 v->VisitPointers(list->start, list->end); 5051 v->VisitRootPointers(Root::kStrongRoots, list->start, list->end);
5048 } 5052 }
5049 v->Synchronize(VisitorSynchronization::kStrongRoots); 5053 v->Synchronize(VisitorSynchronization::kStrongRoots);
5050 5054
5051 // Iterate over the partial snapshot cache unless serializing. 5055 // Iterate over the partial snapshot cache unless serializing.
5052 if (mode != VISIT_ONLY_STRONG_FOR_SERIALIZATION) { 5056 if (mode != VISIT_ONLY_STRONG_FOR_SERIALIZATION) {
5053 SerializerDeserializer::Iterate(isolate_, v); 5057 SerializerDeserializer::Iterate(isolate_, v);
5054 } 5058 }
5055 // We don't do a v->Synchronize call here, because in debug mode that will 5059 // We don't do a v->Synchronize call here, because in debug mode that will
5056 // output a flag to the snapshot. However at this point the serializer and 5060 // output a flag to the snapshot. However at this point the serializer and
5057 // deserializer are deliberately a little unsynchronized (see above) so the 5061 // deserializer are deliberately a little unsynchronized (see above) so the
(...skipping 872 matching lines...) Expand 10 before | Expand all | Expand 10 after
5930 } 5934 }
5931 if (new_length != length) retained_maps->SetLength(new_length); 5935 if (new_length != length) retained_maps->SetLength(new_length);
5932 } 5936 }
5933 5937
5934 void Heap::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) { 5938 void Heap::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
5935 v8::internal::V8::FatalProcessOutOfMemory(location, is_heap_oom); 5939 v8::internal::V8::FatalProcessOutOfMemory(location, is_heap_oom);
5936 } 5940 }
5937 5941
5938 #ifdef DEBUG 5942 #ifdef DEBUG
5939 5943
5940 class PrintHandleVisitor : public ObjectVisitor { 5944 class PrintHandleVisitor : public RootVisitor {
5941 public: 5945 public:
5942 void VisitPointers(Object** start, Object** end) override { 5946 void VisitRootPointers(Root root, Object** start, Object** end) override {
5943 for (Object** p = start; p < end; p++) 5947 for (Object** p = start; p < end; p++)
5944 PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p), 5948 PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p),
5945 reinterpret_cast<void*>(*p)); 5949 reinterpret_cast<void*>(*p));
5946 } 5950 }
5947 }; 5951 };
5948 5952
5949 5953
5950 void Heap::PrintHandles() { 5954 void Heap::PrintHandles() {
5951 PrintF("Handles:\n"); 5955 PrintF("Handles:\n");
5952 PrintHandleVisitor v; 5956 PrintHandleVisitor v;
5953 isolate_->handle_scope_implementer()->Iterate(&v); 5957 isolate_->handle_scope_implementer()->Iterate(&v);
5954 } 5958 }
5955 5959
5956 #endif 5960 #endif
5957 5961
5958 class CheckHandleCountVisitor : public ObjectVisitor { 5962 class CheckHandleCountVisitor : public RootVisitor {
5959 public: 5963 public:
5960 CheckHandleCountVisitor() : handle_count_(0) {} 5964 CheckHandleCountVisitor() : handle_count_(0) {}
5961 ~CheckHandleCountVisitor() override { 5965 ~CheckHandleCountVisitor() override {
5962 CHECK(handle_count_ < HandleScope::kCheckHandleThreshold); 5966 CHECK(handle_count_ < HandleScope::kCheckHandleThreshold);
5963 } 5967 }
5964 void VisitPointers(Object** start, Object** end) override { 5968 void VisitRootPointers(Root root, Object** start, Object** end) override {
5965 handle_count_ += end - start; 5969 handle_count_ += end - start;
5966 } 5970 }
5967 5971
5968 private: 5972 private:
5969 ptrdiff_t handle_count_; 5973 ptrdiff_t handle_count_;
5970 }; 5974 };
5971 5975
5972 5976
5973 void Heap::CheckHandleCount() { 5977 void Heap::CheckHandleCount() {
5974 CheckHandleCountVisitor v; 5978 CheckHandleCountVisitor v;
(...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after
6110 ~UnreachableObjectsFilter() { 6114 ~UnreachableObjectsFilter() {
6111 heap_->mark_compact_collector()->ClearMarkbits(); 6115 heap_->mark_compact_collector()->ClearMarkbits();
6112 } 6116 }
6113 6117
6114 bool SkipObject(HeapObject* object) { 6118 bool SkipObject(HeapObject* object) {
6115 if (object->IsFiller()) return true; 6119 if (object->IsFiller()) return true;
6116 return ObjectMarking::IsWhite(object, MarkingState::Internal(object)); 6120 return ObjectMarking::IsWhite(object, MarkingState::Internal(object));
6117 } 6121 }
6118 6122
6119 private: 6123 private:
6120 class MarkingVisitor : public ObjectVisitor { 6124 class MarkingVisitor : public ObjectVisitor, public RootVisitor {
6121 public: 6125 public:
6122 MarkingVisitor() : marking_stack_(10) {} 6126 MarkingVisitor() : marking_stack_(10) {}
6123 6127
6124 void VisitPointers(Object** start, Object** end) override { 6128 void VisitPointers(Object** start, Object** end) override {
6129 MarkPointers(start, end);
6130 }
6131
6132 void VisitRootPointers(Root root, Object** start, Object** end) override {
6133 MarkPointers(start, end);
6134 }
6135
6136 void TransitiveClosure() {
6137 while (!marking_stack_.is_empty()) {
6138 HeapObject* obj = marking_stack_.RemoveLast();
6139 obj->Iterate(this);
6140 }
6141 }
6142
6143 private:
6144 void MarkPointers(Object** start, Object** end) {
6125 for (Object** p = start; p < end; p++) { 6145 for (Object** p = start; p < end; p++) {
6126 if (!(*p)->IsHeapObject()) continue; 6146 if (!(*p)->IsHeapObject()) continue;
6127 HeapObject* obj = HeapObject::cast(*p); 6147 HeapObject* obj = HeapObject::cast(*p);
6128 // Use Marking instead of ObjectMarking to avoid adjusting live bytes 6148 // Use Marking instead of ObjectMarking to avoid adjusting live bytes
6129 // counter. 6149 // counter.
6130 MarkBit mark_bit = 6150 MarkBit mark_bit =
6131 ObjectMarking::MarkBitFrom(obj, MarkingState::Internal(obj)); 6151 ObjectMarking::MarkBitFrom(obj, MarkingState::Internal(obj));
6132 if (Marking::IsWhite(mark_bit)) { 6152 if (Marking::IsWhite(mark_bit)) {
6133 Marking::WhiteToBlack(mark_bit); 6153 Marking::WhiteToBlack(mark_bit);
6134 marking_stack_.Add(obj); 6154 marking_stack_.Add(obj);
6135 } 6155 }
6136 } 6156 }
6137 } 6157 }
6138
6139 void TransitiveClosure() {
6140 while (!marking_stack_.is_empty()) {
6141 HeapObject* obj = marking_stack_.RemoveLast();
6142 obj->Iterate(this);
6143 }
6144 }
6145
6146 private:
6147 List<HeapObject*> marking_stack_; 6158 List<HeapObject*> marking_stack_;
6148 }; 6159 };
6149 6160
6150 void MarkReachableObjects() { 6161 void MarkReachableObjects() {
6151 MarkingVisitor visitor; 6162 MarkingVisitor visitor;
6152 heap_->IterateRoots(&visitor, VISIT_ALL); 6163 heap_->IterateRoots(&visitor, VISIT_ALL);
6153 visitor.TransitiveClosure(); 6164 visitor.TransitiveClosure();
6154 } 6165 }
6155 6166
6156 Heap* heap_; 6167 Heap* heap_;
(...skipping 247 matching lines...) Expand 10 before | Expand all | Expand 10 after
6404 } 6415 }
6405 6416
6406 6417
6407 // static 6418 // static
6408 int Heap::GetStaticVisitorIdForMap(Map* map) { 6419 int Heap::GetStaticVisitorIdForMap(Map* map) {
6409 return StaticVisitorBase::GetVisitorId(map); 6420 return StaticVisitorBase::GetVisitorId(map);
6410 } 6421 }
6411 6422
6412 } // namespace internal 6423 } // namespace internal
6413 } // namespace v8 6424 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/heap-inl.h » ('j') | src/heap/mark-compact.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698