OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_HEAP_H_ | 5 #ifndef V8_HEAP_HEAP_H_ |
6 #define V8_HEAP_HEAP_H_ | 6 #define V8_HEAP_HEAP_H_ |
7 | 7 |
8 #include <cmath> | 8 #include <cmath> |
9 #include <map> | 9 #include <map> |
10 | 10 |
(...skipping 559 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
570 | 570 |
571 | 571 |
572 enum ArrayStorageAllocationMode { | 572 enum ArrayStorageAllocationMode { |
573 DONT_INITIALIZE_ARRAY_ELEMENTS, | 573 DONT_INITIALIZE_ARRAY_ELEMENTS, |
574 INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE | 574 INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE |
575 }; | 575 }; |
576 | 576 |
577 | 577 |
578 class Heap { | 578 class Heap { |
579 public: | 579 public: |
580 // Declare all the root indices. This defines the root list order. | |
581 enum RootListIndex { | |
582 #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, | |
583 STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION) | |
584 #undef ROOT_INDEX_DECLARATION | |
585 | |
586 #define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex, | |
587 INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION) | |
588 #undef STRING_DECLARATION | |
589 | |
590 #define SYMBOL_INDEX_DECLARATION(name) k##name##RootIndex, | |
591 PRIVATE_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION) | |
592 #undef SYMBOL_INDEX_DECLARATION | |
593 | |
594 #define SYMBOL_INDEX_DECLARATION(name, varname, description) k##name##RootIndex, | |
595 PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION) | |
596 #undef SYMBOL_INDEX_DECLARATION | |
597 | |
598 // Utility type maps | |
599 #define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex, | |
600 STRUCT_LIST(DECLARE_STRUCT_MAP) | |
601 #undef DECLARE_STRUCT_MAP | |
602 kStringTableRootIndex, | |
603 | |
604 #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, | |
605 SMI_ROOT_LIST(ROOT_INDEX_DECLARATION) | |
606 #undef ROOT_INDEX_DECLARATION | |
607 kRootListLength, | |
608 kStrongRootListLength = kStringTableRootIndex, | |
609 kSmiRootsStart = kStringTableRootIndex + 1 | |
610 }; | |
611 | |
612 // Indicates whether live bytes adjustment is triggered | |
613 // - from within the GC code before sweeping started (SEQUENTIAL_TO_SWEEPER), | |
614 // - or from within GC (CONCURRENT_TO_SWEEPER), | |
615 // - or mutator code (CONCURRENT_TO_SWEEPER). | |
616 enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER }; | |
617 | |
618 enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT }; | |
619 | |
620 enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT }; | |
621 | |
622 // ObjectStats are kept in two arrays, counts and sizes. Related stats are | |
623 // stored in a contiguous linear buffer. Stats groups are stored one after | |
624 // another. | |
625 enum { | |
626 FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1, | |
627 FIRST_FIXED_ARRAY_SUB_TYPE = | |
628 FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS, | |
629 FIRST_CODE_AGE_SUB_TYPE = | |
630 FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1, | |
631 OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1 | |
632 }; | |
633 | |
634 // Taking this lock prevents the GC from entering a phase that relocates | |
635 // object references. | |
636 class RelocationLock { | |
637 public: | |
638 explicit RelocationLock(Heap* heap) : heap_(heap) { | |
639 heap_->relocation_mutex_.Lock(); | |
640 } | |
641 | |
642 ~RelocationLock() { heap_->relocation_mutex_.Unlock(); } | |
643 | |
644 private: | |
645 Heap* heap_; | |
646 }; | |
647 | |
648 // An optional version of the above lock that can be used for some critical | |
649 // sections on the mutator thread; only safe since the GC currently does not | |
650 // do concurrent compaction. | |
651 class OptionalRelocationLock { | |
652 public: | |
653 OptionalRelocationLock(Heap* heap, bool concurrent) | |
654 : heap_(heap), concurrent_(concurrent) { | |
655 if (concurrent_) heap_->relocation_mutex_.Lock(); | |
656 } | |
657 | |
658 ~OptionalRelocationLock() { | |
659 if (concurrent_) heap_->relocation_mutex_.Unlock(); | |
660 } | |
661 | |
662 private: | |
663 Heap* heap_; | |
664 bool concurrent_; | |
665 }; | |
666 | |
667 // Support for partial snapshots. After calling this we have a linear | |
668 // space to write objects in each space. | |
669 struct Chunk { | |
670 uint32_t size; | |
671 Address start; | |
672 Address end; | |
673 }; | |
674 typedef List<Chunk> Reservation; | |
675 | |
676 static const intptr_t kMinimumOldGenerationAllocationLimit = | |
677 8 * (Page::kPageSize > MB ? Page::kPageSize : MB); | |
678 | |
679 static const int kInitalOldGenerationLimitFactor = 2; | |
680 | |
681 #if V8_OS_ANDROID | |
682 // Don't apply pointer multiplier on Android since it has no swap space and | |
683 // should instead adapt it's heap size based on available physical memory. | |
684 static const int kPointerMultiplier = 1; | |
685 #else | |
686 static const int kPointerMultiplier = i::kPointerSize / 4; | |
687 #endif | |
688 | |
689 // The new space size has to be a power of 2. Sizes are in MB. | |
690 static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier; | |
691 static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier; | |
692 static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier; | |
693 static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier; | |
694 | |
695 // The old space size has to be a multiple of Page::kPageSize. | |
696 // Sizes are in MB. | |
697 static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier; | |
698 static const int kMaxOldSpaceSizeMediumMemoryDevice = | |
699 256 * kPointerMultiplier; | |
700 static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier; | |
701 static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier; | |
702 | |
703 // The executable size has to be a multiple of Page::kPageSize. | |
704 // Sizes are in MB. | |
705 static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier; | |
706 static const int kMaxExecutableSizeMediumMemoryDevice = | |
707 192 * kPointerMultiplier; | |
708 static const int kMaxExecutableSizeHighMemoryDevice = | |
709 256 * kPointerMultiplier; | |
710 static const int kMaxExecutableSizeHugeMemoryDevice = | |
711 256 * kPointerMultiplier; | |
712 | |
713 static const int kTraceRingBufferSize = 512; | |
714 static const int kStacktraceBufferSize = 512; | |
715 | |
716 static const double kMinHeapGrowingFactor = 1.1; | |
717 static const double kMaxHeapGrowingFactor = 4.0; | |
718 static const double kMaxHeapGrowingFactorMemoryConstrained = 2.0; | |
719 static const double kMaxHeapGrowingFactorIdle = 1.5; | |
720 static const double kTargetMutatorUtilization = 0.97; | |
721 | |
722 // Sloppy mode arguments object size. | |
723 static const int kSloppyArgumentsObjectSize = | |
724 JSObject::kHeaderSize + 2 * kPointerSize; | |
725 | |
726 // Strict mode arguments has no callee so it is smaller. | |
727 static const int kStrictArgumentsObjectSize = | |
728 JSObject::kHeaderSize + 1 * kPointerSize; | |
729 | |
730 // Indicies for direct access into argument objects. | |
731 static const int kArgumentsLengthIndex = 0; | |
732 | |
733 // callee is only valid in sloppy mode. | |
734 static const int kArgumentsCalleeIndex = 1; | |
735 | |
736 static const int kNoGCFlags = 0; | |
737 static const int kReduceMemoryFootprintMask = 1; | |
738 static const int kAbortIncrementalMarkingMask = 2; | |
739 static const int kFinalizeIncrementalMarkingMask = 4; | |
740 | |
741 // Making the heap iterable requires us to abort incremental marking. | |
742 static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask; | |
743 | |
744 // The roots that have an index less than this are always in old space. | |
745 static const int kOldSpaceRoots = 0x20; | |
746 | |
747 STATIC_ASSERT(kUndefinedValueRootIndex == | |
748 Internals::kUndefinedValueRootIndex); | |
749 STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex); | |
750 STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex); | |
751 STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex); | |
752 STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex); | |
753 | |
754 // Calculates the maximum amount of filler that could be required by the | |
755 // given alignment. | |
756 static int GetMaximumFillToAlign(AllocationAlignment alignment); | |
757 // Calculates the actual amount of filler required for a given address at the | |
758 // given alignment. | |
759 static int GetFillToAlign(Address address, AllocationAlignment alignment); | |
760 | |
761 template <typename T> | |
762 static inline bool IsOneByte(T t, int chars); | |
763 | |
764 // Callback function passed to Heap::Iterate etc. Copies an object if | |
765 // necessary, the object might be promoted to an old space. The caller must | |
766 // ensure the precondition that the object is (a) a heap object and (b) in | |
767 // the heap's from space. | |
768 static inline void ScavengePointer(HeapObject** p); | |
769 static inline void ScavengeObject(HeapObject** p, HeapObject* object); | |
770 | |
771 // Slow part of scavenge object. | |
772 static void ScavengeObjectSlow(HeapObject** p, HeapObject* object); | |
773 | |
774 static void FatalProcessOutOfMemory(const char* location, | |
775 bool take_snapshot = false); | |
776 | |
777 static bool RootIsImmortalImmovable(int root_index); | |
778 | |
779 // Checks whether the space is valid. | |
780 static bool IsValidAllocationSpace(AllocationSpace space); | |
781 | |
782 // An object may have an AllocationSite associated with it through a trailing | |
783 // AllocationMemento. Its feedback should be updated when objects are found | |
784 // in the heap. | |
785 static inline void UpdateAllocationSiteFeedback(HeapObject* object, | |
786 ScratchpadSlotMode mode); | |
787 | |
788 // Generated code can embed direct references to non-writable roots if | |
789 // they are in new space. | |
790 static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index); | |
791 | |
792 // Zapping is needed for verify heap, and always done in debug builds. | |
793 static inline bool ShouldZapGarbage() { | |
794 #ifdef DEBUG | |
795 return true; | |
796 #else | |
797 #ifdef VERIFY_HEAP | |
798 return FLAG_verify_heap; | |
799 #else | |
800 return false; | |
801 #endif | |
802 #endif | |
803 } | |
804 | |
805 static double HeapGrowingFactor(double gc_speed, double mutator_speed); | |
806 | |
807 // Copy block of memory from src to dst. Size of block should be aligned | |
808 // by pointer size. | |
809 static inline void CopyBlock(Address dst, Address src, int byte_size); | |
810 | |
811 // Optimized version of memmove for blocks with pointer size aligned sizes and | |
812 // pointer size aligned addresses. | |
813 static inline void MoveBlock(Address dst, Address src, int byte_size); | |
814 | |
580 // Configure heap size in MB before setup. Return false if the heap has been | 815 // Configure heap size in MB before setup. Return false if the heap has been |
581 // set up already. | 816 // set up already. |
582 bool ConfigureHeap(int max_semi_space_size, int max_old_space_size, | 817 bool ConfigureHeap(int max_semi_space_size, int max_old_space_size, |
583 int max_executable_size, size_t code_range_size); | 818 int max_executable_size, size_t code_range_size); |
584 bool ConfigureHeapDefault(); | 819 bool ConfigureHeapDefault(); |
585 | 820 |
586 // Prepares the heap, setting up memory areas that are needed in the isolate | 821 // Prepares the heap, setting up memory areas that are needed in the isolate |
587 // without actually creating any objects. | 822 // without actually creating any objects. |
588 bool SetUp(); | 823 bool SetUp(); |
589 | 824 |
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
661 // for all addresses in either semispace. | 896 // for all addresses in either semispace. |
662 Address NewSpaceStart() { return new_space_.start(); } | 897 Address NewSpaceStart() { return new_space_.start(); } |
663 uintptr_t NewSpaceMask() { return new_space_.mask(); } | 898 uintptr_t NewSpaceMask() { return new_space_.mask(); } |
664 Address NewSpaceTop() { return new_space_.top(); } | 899 Address NewSpaceTop() { return new_space_.top(); } |
665 | 900 |
666 NewSpace* new_space() { return &new_space_; } | 901 NewSpace* new_space() { return &new_space_; } |
667 OldSpace* old_space() { return old_space_; } | 902 OldSpace* old_space() { return old_space_; } |
668 OldSpace* code_space() { return code_space_; } | 903 OldSpace* code_space() { return code_space_; } |
669 MapSpace* map_space() { return map_space_; } | 904 MapSpace* map_space() { return map_space_; } |
670 LargeObjectSpace* lo_space() { return lo_space_; } | 905 LargeObjectSpace* lo_space() { return lo_space_; } |
906 | |
671 PagedSpace* paged_space(int idx) { | 907 PagedSpace* paged_space(int idx) { |
672 switch (idx) { | 908 switch (idx) { |
673 case OLD_SPACE: | 909 case OLD_SPACE: |
674 return old_space(); | 910 return old_space(); |
675 case MAP_SPACE: | 911 case MAP_SPACE: |
676 return map_space(); | 912 return map_space(); |
677 case CODE_SPACE: | 913 case CODE_SPACE: |
678 return code_space(); | 914 return code_space(); |
679 case NEW_SPACE: | 915 case NEW_SPACE: |
680 case LO_SPACE: | 916 case LO_SPACE: |
681 UNREACHABLE(); | 917 UNREACHABLE(); |
682 } | 918 } |
683 return NULL; | 919 return NULL; |
684 } | 920 } |
921 | |
685 Space* space(int idx) { | 922 Space* space(int idx) { |
686 switch (idx) { | 923 switch (idx) { |
687 case NEW_SPACE: | 924 case NEW_SPACE: |
688 return new_space(); | 925 return new_space(); |
689 case LO_SPACE: | 926 case LO_SPACE: |
690 return lo_space(); | 927 return lo_space(); |
691 default: | 928 default: |
692 return paged_space(idx); | 929 return paged_space(idx); |
693 } | 930 } |
694 } | 931 } |
(...skipping 19 matching lines...) Expand all Loading... | |
714 Address* OldSpaceAllocationLimitAddress() { | 951 Address* OldSpaceAllocationLimitAddress() { |
715 return old_space_->allocation_limit_address(); | 952 return old_space_->allocation_limit_address(); |
716 } | 953 } |
717 | 954 |
718 // TODO(hpayer): There is still a missmatch between capacity and actual | 955 // TODO(hpayer): There is still a missmatch between capacity and actual |
719 // committed memory size. | 956 // committed memory size. |
720 bool CanExpandOldGeneration(int size) { | 957 bool CanExpandOldGeneration(int size) { |
721 return (CommittedOldGenerationMemory() + size) < MaxOldGenerationSize(); | 958 return (CommittedOldGenerationMemory() + size) < MaxOldGenerationSize(); |
722 } | 959 } |
723 | 960 |
724 // Returns a deep copy of the JavaScript object. | |
725 // Properties and elements are copied too. | |
726 // Optionally takes an AllocationSite to be appended in an AllocationMemento. | |
727 MUST_USE_RESULT AllocationResult | |
728 CopyJSObject(JSObject* source, AllocationSite* site = NULL); | |
729 | |
730 // Calculates the maximum amount of filler that could be required by the | |
731 // given alignment. | |
732 static int GetMaximumFillToAlign(AllocationAlignment alignment); | |
733 // Calculates the actual amount of filler required for a given address at the | |
734 // given alignment. | |
735 static int GetFillToAlign(Address address, AllocationAlignment alignment); | |
736 | |
737 // Creates a filler object and returns a heap object immediately after it. | |
738 MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object, | |
739 int filler_size); | |
740 // Creates a filler object if needed for alignment and returns a heap object | |
741 // immediately after it. If any space is left after the returned object, | |
742 // another filler object is created so the over allocated memory is iterable. | |
743 MUST_USE_RESULT HeapObject* AlignWithFiller(HeapObject* object, | |
744 int object_size, | |
745 int allocation_size, | |
746 AllocationAlignment alignment); | |
747 | |
748 // Clear the Instanceof cache (used when a prototype changes). | 961 // Clear the Instanceof cache (used when a prototype changes). |
749 inline void ClearInstanceofCache(); | 962 inline void ClearInstanceofCache(); |
750 | 963 |
751 // Iterates the whole code space to clear all ICs of the given kind. | 964 // Iterates the whole code space to clear all ICs of the given kind. |
752 void ClearAllICsByKind(Code::Kind kind); | 965 void ClearAllICsByKind(Code::Kind kind); |
753 | 966 |
754 // FreeSpace objects have a null map after deserialization. Update the map. | 967 // FreeSpace objects have a null map after deserialization. Update the map. |
755 void RepairFreeListsAfterDeserialization(); | 968 void RepairFreeListsAfterDeserialization(); |
756 | 969 |
757 template <typename T> | |
758 static inline bool IsOneByte(T t, int chars); | |
759 | |
760 // Move len elements within a given array from src_index index to dst_index | 970 // Move len elements within a given array from src_index index to dst_index |
761 // index. | 971 // index. |
762 void MoveElements(FixedArray* array, int dst_index, int src_index, int len); | 972 void MoveElements(FixedArray* array, int dst_index, int src_index, int len); |
763 | 973 |
764 // Sloppy mode arguments object size. | |
765 static const int kSloppyArgumentsObjectSize = | |
766 JSObject::kHeaderSize + 2 * kPointerSize; | |
767 // Strict mode arguments has no callee so it is smaller. | |
768 static const int kStrictArgumentsObjectSize = | |
769 JSObject::kHeaderSize + 1 * kPointerSize; | |
770 // Indicies for direct access into argument objects. | |
771 static const int kArgumentsLengthIndex = 0; | |
772 // callee is only valid in sloppy mode. | |
773 static const int kArgumentsCalleeIndex = 1; | |
774 | |
775 // Finalizes an external string by deleting the associated external | 974 // Finalizes an external string by deleting the associated external |
776 // data and clearing the resource pointer. | 975 // data and clearing the resource pointer. |
777 inline void FinalizeExternalString(String* string); | 976 inline void FinalizeExternalString(String* string); |
778 | 977 |
779 // Initialize a filler object to keep the ability to iterate over the heap | 978 // Initialize a filler object to keep the ability to iterate over the heap |
780 // when introducing gaps within pages. | 979 // when introducing gaps within pages. |
781 void CreateFillerObjectAt(Address addr, int size); | 980 void CreateFillerObjectAt(Address addr, int size); |
782 | 981 |
783 bool CanMoveObjectStart(HeapObject* object); | 982 bool CanMoveObjectStart(HeapObject* object); |
784 | 983 |
785 // Indicates whether live bytes adjustment is triggered | |
786 // - from within the GC code before sweeping started (SEQUENTIAL_TO_SWEEPER), | |
787 // - or from within GC (CONCURRENT_TO_SWEEPER), | |
788 // - or mutator code (CONCURRENT_TO_SWEEPER). | |
789 enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER }; | |
790 | |
791 // Maintain consistency of live bytes during incremental marking. | 984 // Maintain consistency of live bytes during incremental marking. |
792 void AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode); | 985 void AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode); |
793 | 986 |
794 // Trim the given array from the left. Note that this relocates the object | 987 // Trim the given array from the left. Note that this relocates the object |
795 // start and hence is only valid if there is only a single reference to it. | 988 // start and hence is only valid if there is only a single reference to it. |
796 FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim); | 989 FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim); |
797 | 990 |
798 // Trim the given array from the right. | 991 // Trim the given array from the right. |
799 template<Heap::InvocationMode mode> | 992 template<Heap::InvocationMode mode> |
800 void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim); | 993 void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim); |
801 | 994 |
802 // Converts the given boolean condition to JavaScript boolean value. | 995 // Converts the given boolean condition to JavaScript boolean value. |
803 inline Object* ToBoolean(bool condition); | 996 inline Object* ToBoolean(bool condition); |
804 | 997 |
805 // Performs garbage collection operation. | 998 // Performs garbage collection operation. |
806 // Returns whether there is a chance that another major GC could | 999 // Returns whether there is a chance that another major GC could |
807 // collect more garbage. | 1000 // collect more garbage. |
808 inline bool CollectGarbage( | 1001 inline bool CollectGarbage( |
809 AllocationSpace space, const char* gc_reason = NULL, | 1002 AllocationSpace space, const char* gc_reason = NULL, |
810 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); | 1003 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |
811 | 1004 |
812 static const int kNoGCFlags = 0; | |
813 static const int kReduceMemoryFootprintMask = 1; | |
814 static const int kAbortIncrementalMarkingMask = 2; | |
815 static const int kFinalizeIncrementalMarkingMask = 4; | |
816 | |
817 // Making the heap iterable requires us to abort incremental marking. | |
818 static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask; | |
819 | |
820 // Invoked when GC was requested via the stack guard. | 1005 // Invoked when GC was requested via the stack guard. |
821 void HandleGCRequest(); | 1006 void HandleGCRequest(); |
822 | 1007 |
823 // Attempt to over-approximate the weak closure by marking object groups and | 1008 // Attempt to over-approximate the weak closure by marking object groups and |
824 // implicit references from global handles, but don't atomically complete | 1009 // implicit references from global handles, but don't atomically complete |
825 // marking. If we continue to mark incrementally, we might have marked | 1010 // marking. If we continue to mark incrementally, we might have marked |
826 // objects that die later. | 1011 // objects that die later. |
827 void OverApproximateWeakClosure(const char* gc_reason); | 1012 void OverApproximateWeakClosure(const char* gc_reason); |
828 | 1013 |
829 // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is | 1014 // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is |
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
974 // Checks whether an address/object in the heap (including auxiliary | 1159 // Checks whether an address/object in the heap (including auxiliary |
975 // area and unused area). | 1160 // area and unused area). |
976 bool Contains(Address addr); | 1161 bool Contains(Address addr); |
977 bool Contains(HeapObject* value); | 1162 bool Contains(HeapObject* value); |
978 | 1163 |
979 // Checks whether an address/object in a space. | 1164 // Checks whether an address/object in a space. |
980 // Currently used by tests, serialization and heap verification only. | 1165 // Currently used by tests, serialization and heap verification only. |
981 bool InSpace(Address addr, AllocationSpace space); | 1166 bool InSpace(Address addr, AllocationSpace space); |
982 bool InSpace(HeapObject* value, AllocationSpace space); | 1167 bool InSpace(HeapObject* value, AllocationSpace space); |
983 | 1168 |
984 // Checks whether the space is valid. | |
985 static bool IsValidAllocationSpace(AllocationSpace space); | |
986 | |
987 // Checks whether the given object is allowed to be migrated from it's | 1169 // Checks whether the given object is allowed to be migrated from it's |
988 // current space into the given destination space. Used for debugging. | 1170 // current space into the given destination space. Used for debugging. |
989 inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest); | 1171 inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest); |
990 | 1172 |
991 // Sets the stub_cache_ (only used when expanding the dictionary). | 1173 // Sets the stub_cache_ (only used when expanding the dictionary). |
992 void public_set_code_stubs(UnseededNumberDictionary* value) { | 1174 void public_set_code_stubs(UnseededNumberDictionary* value) { |
993 roots_[kCodeStubsRootIndex] = value; | 1175 roots_[kCodeStubsRootIndex] = value; |
994 } | 1176 } |
995 | 1177 |
996 // Sets the non_monomorphic_cache_ (only used when expanding the dictionary). | 1178 // Sets the non_monomorphic_cache_ (only used when expanding the dictionary). |
(...skipping 13 matching lines...) Expand all Loading... | |
1010 roots_[kMaterializedObjectsRootIndex] = objects; | 1192 roots_[kMaterializedObjectsRootIndex] = objects; |
1011 } | 1193 } |
1012 | 1194 |
1013 // Generated code can embed this address to get access to the roots. | 1195 // Generated code can embed this address to get access to the roots. |
1014 Object** roots_array_start() { return roots_; } | 1196 Object** roots_array_start() { return roots_; } |
1015 | 1197 |
1016 Address* store_buffer_top_address() { | 1198 Address* store_buffer_top_address() { |
1017 return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]); | 1199 return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]); |
1018 } | 1200 } |
1019 | 1201 |
1020 static bool RootIsImmortalImmovable(int root_index); | |
1021 void CheckHandleCount(); | 1202 void CheckHandleCount(); |
1022 | 1203 |
1023 #ifdef VERIFY_HEAP | |
1024 // Verify the heap is in its normal state before or after a GC. | |
1025 void Verify(); | |
1026 #endif | |
1027 | |
1028 #ifdef DEBUG | |
1029 void Print(); | |
1030 void PrintHandles(); | |
1031 | |
1032 // Report heap statistics. | |
1033 void ReportHeapStatistics(const char* title); | |
1034 void ReportCodeStatistics(const char* title); | |
1035 #endif | |
1036 | |
1037 // Zapping is needed for verify heap, and always done in debug builds. | |
1038 static inline bool ShouldZapGarbage() { | |
1039 #ifdef DEBUG | |
1040 return true; | |
1041 #else | |
1042 #ifdef VERIFY_HEAP | |
1043 return FLAG_verify_heap; | |
1044 #else | |
1045 return false; | |
1046 #endif | |
1047 #endif | |
1048 } | |
1049 | |
1050 // Number of "runtime allocations" done so far. | 1204 // Number of "runtime allocations" done so far. |
1051 uint32_t allocations_count() { return allocations_count_; } | 1205 uint32_t allocations_count() { return allocations_count_; } |
1052 | 1206 |
1053 // Returns deterministic "time" value in ms. Works only with | 1207 // Returns deterministic "time" value in ms. Works only with |
1054 // FLAG_verify_predictable. | 1208 // FLAG_verify_predictable. |
1055 double synthetic_time() { return allocations_count_ / 2.0; } | 1209 double synthetic_time() { return allocations_count_ / 2.0; } |
1056 | 1210 |
1057 // Print short heap statistics. | 1211 // Print short heap statistics. |
1058 void PrintShortHeapStatistics(); | 1212 void PrintShortHeapStatistics(); |
1059 | 1213 |
1060 size_t object_count_last_gc(size_t index) { | 1214 size_t object_count_last_gc(size_t index) { |
1061 return index < OBJECT_STATS_COUNT ? object_counts_last_time_[index] : 0; | 1215 return index < OBJECT_STATS_COUNT ? object_counts_last_time_[index] : 0; |
1062 } | 1216 } |
1217 | |
1063 size_t object_size_last_gc(size_t index) { | 1218 size_t object_size_last_gc(size_t index) { |
1064 return index < OBJECT_STATS_COUNT ? object_sizes_last_time_[index] : 0; | 1219 return index < OBJECT_STATS_COUNT ? object_sizes_last_time_[index] : 0; |
1065 } | 1220 } |
1066 | 1221 |
1067 // Write barrier support for address[offset] = o. | 1222 // Write barrier support for address[offset] = o. |
1068 INLINE(void RecordWrite(Address address, int offset)); | 1223 INLINE(void RecordWrite(Address address, int offset)); |
1069 | 1224 |
1070 // Write barrier support for address[start : start + len[ = o. | 1225 // Write barrier support for address[start : start + len[ = o. |
1071 INLINE(void RecordWrites(Address address, int start, int len)); | 1226 INLINE(void RecordWrites(Address address, int start, int len)); |
1072 | 1227 |
1073 enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT }; | |
1074 inline HeapState gc_state() { return gc_state_; } | 1228 inline HeapState gc_state() { return gc_state_; } |
1075 | 1229 |
1076 inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; } | 1230 inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; } |
1077 | 1231 |
1232 #ifdef VERIFY_HEAP | |
Hannes Payer (out of office)
2015/08/24 10:38:38
Can we move these ifdefs to the end of the methods
Michael Lippautz
2015/08/24 11:14:27
Done.
| |
1233 // Verify the heap is in its normal state before or after a GC. | |
1234 void Verify(); | |
1235 #endif | |
1236 | |
1078 #ifdef DEBUG | 1237 #ifdef DEBUG |
1079 void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; } | 1238 void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; } |
1080 | 1239 |
1081 void TracePathToObjectFrom(Object* target, Object* root); | 1240 void TracePathToObjectFrom(Object* target, Object* root); |
1082 void TracePathToObject(Object* target); | 1241 void TracePathToObject(Object* target); |
1083 void TracePathToGlobal(); | 1242 void TracePathToGlobal(); |
1243 | |
1244 void Print(); | |
1245 void PrintHandles(); | |
1246 | |
1247 // Report heap statistics. | |
1248 void ReportHeapStatistics(const char* title); | |
1249 void ReportCodeStatistics(const char* title); | |
1084 #endif | 1250 #endif |
1085 | 1251 |
1086 // Callback function passed to Heap::Iterate etc. Copies an object if | |
1087 // necessary, the object might be promoted to an old space. The caller must | |
1088 // ensure the precondition that the object is (a) a heap object and (b) in | |
1089 // the heap's from space. | |
1090 static inline void ScavengePointer(HeapObject** p); | |
1091 static inline void ScavengeObject(HeapObject** p, HeapObject* object); | |
1092 | |
1093 // Slow part of scavenge object. | |
1094 static void ScavengeObjectSlow(HeapObject** p, HeapObject* object); | |
1095 | |
1096 enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT }; | |
1097 | |
1098 // If an object has an AllocationMemento trailing it, return it, otherwise | 1252 // If an object has an AllocationMemento trailing it, return it, otherwise |
1099 // return NULL; | 1253 // return NULL; |
1100 inline AllocationMemento* FindAllocationMemento(HeapObject* object); | 1254 inline AllocationMemento* FindAllocationMemento(HeapObject* object); |
1101 | 1255 |
1102 // An object may have an AllocationSite associated with it through a trailing | |
1103 // AllocationMemento. Its feedback should be updated when objects are found | |
1104 // in the heap. | |
1105 static inline void UpdateAllocationSiteFeedback(HeapObject* object, | |
1106 ScratchpadSlotMode mode); | |
1107 | |
1108 // Support for partial snapshots. After calling this we have a linear | |
1109 // space to write objects in each space. | |
1110 struct Chunk { | |
1111 uint32_t size; | |
1112 Address start; | |
1113 Address end; | |
1114 }; | |
1115 | |
1116 typedef List<Chunk> Reservation; | |
1117 | |
1118 // Returns false if not able to reserve. | 1256 // Returns false if not able to reserve. |
1119 bool ReserveSpace(Reservation* reservations); | 1257 bool ReserveSpace(Reservation* reservations); |
1120 | 1258 |
1121 // | 1259 // |
1122 // Support for the API. | 1260 // Support for the API. |
1123 // | 1261 // |
1124 | 1262 |
1125 void CreateApiObjects(); | 1263 void CreateApiObjects(); |
1126 | 1264 |
1127 inline intptr_t PromotedTotalSize() { | 1265 inline intptr_t PromotedTotalSize() { |
1128 int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize(); | 1266 int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize(); |
1129 if (total > std::numeric_limits<intptr_t>::max()) { | 1267 if (total > std::numeric_limits<intptr_t>::max()) { |
1130 // TODO(erikcorry): Use uintptr_t everywhere we do heap size calculations. | 1268 // TODO(erikcorry): Use uintptr_t everywhere we do heap size calculations. |
1131 return std::numeric_limits<intptr_t>::max(); | 1269 return std::numeric_limits<intptr_t>::max(); |
1132 } | 1270 } |
1133 if (total < 0) return 0; | 1271 if (total < 0) return 0; |
1134 return static_cast<intptr_t>(total); | 1272 return static_cast<intptr_t>(total); |
1135 } | 1273 } |
1136 | 1274 |
1137 inline intptr_t OldGenerationSpaceAvailable() { | 1275 inline intptr_t OldGenerationSpaceAvailable() { |
1138 return old_generation_allocation_limit_ - PromotedTotalSize(); | 1276 return old_generation_allocation_limit_ - PromotedTotalSize(); |
1139 } | 1277 } |
1140 | 1278 |
1141 inline intptr_t OldGenerationCapacityAvailable() { | 1279 inline intptr_t OldGenerationCapacityAvailable() { |
1142 return max_old_generation_size_ - PromotedTotalSize(); | 1280 return max_old_generation_size_ - PromotedTotalSize(); |
1143 } | 1281 } |
1144 | 1282 |
1145 static const intptr_t kMinimumOldGenerationAllocationLimit = | |
1146 8 * (Page::kPageSize > MB ? Page::kPageSize : MB); | |
1147 | |
1148 static const int kInitalOldGenerationLimitFactor = 2; | |
1149 | |
1150 #if V8_OS_ANDROID | |
1151 // Don't apply pointer multiplier on Android since it has no swap space and | |
1152 // should instead adapt it's heap size based on available physical memory. | |
1153 static const int kPointerMultiplier = 1; | |
1154 #else | |
1155 static const int kPointerMultiplier = i::kPointerSize / 4; | |
1156 #endif | |
1157 | |
1158 // The new space size has to be a power of 2. Sizes are in MB. | |
1159 static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier; | |
1160 static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier; | |
1161 static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier; | |
1162 static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier; | |
1163 | |
1164 // The old space size has to be a multiple of Page::kPageSize. | |
1165 // Sizes are in MB. | |
1166 static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier; | |
1167 static const int kMaxOldSpaceSizeMediumMemoryDevice = | |
1168 256 * kPointerMultiplier; | |
1169 static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier; | |
1170 static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier; | |
1171 | |
1172 // The executable size has to be a multiple of Page::kPageSize. | |
1173 // Sizes are in MB. | |
1174 static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier; | |
1175 static const int kMaxExecutableSizeMediumMemoryDevice = | |
1176 192 * kPointerMultiplier; | |
1177 static const int kMaxExecutableSizeHighMemoryDevice = | |
1178 256 * kPointerMultiplier; | |
1179 static const int kMaxExecutableSizeHugeMemoryDevice = | |
1180 256 * kPointerMultiplier; | |
1181 | |
1182 static const int kTraceRingBufferSize = 512; | |
1183 static const int kStacktraceBufferSize = 512; | |
1184 | |
1185 static const double kMinHeapGrowingFactor; | |
1186 static const double kMaxHeapGrowingFactor; | |
1187 static const double kMaxHeapGrowingFactorMemoryConstrained; | |
1188 static const double kMaxHeapGrowingFactorIdle; | |
1189 static const double kTargetMutatorUtilization; | |
1190 | |
1191 static double HeapGrowingFactor(double gc_speed, double mutator_speed); | |
1192 | |
1193 // Calculates the allocation limit based on a given growing factor and a | 1283 // Calculates the allocation limit based on a given growing factor and a |
1194 // given old generation size. | 1284 // given old generation size. |
1195 intptr_t CalculateOldGenerationAllocationLimit(double factor, | 1285 intptr_t CalculateOldGenerationAllocationLimit(double factor, |
1196 intptr_t old_gen_size); | 1286 intptr_t old_gen_size); |
1197 | 1287 |
1198 // Sets the allocation limit to trigger the next full garbage collection. | 1288 // Sets the allocation limit to trigger the next full garbage collection. |
1199 void SetOldGenerationAllocationLimit(intptr_t old_gen_size, double gc_speed, | 1289 void SetOldGenerationAllocationLimit(intptr_t old_gen_size, double gc_speed, |
1200 double mutator_speed); | 1290 double mutator_speed); |
1201 | 1291 |
1202 // Decrease the allocation limit if the new limit based on the given | 1292 // Decrease the allocation limit if the new limit based on the given |
1203 // parameters is lower than the current limit. | 1293 // parameters is lower than the current limit. |
1204 void DampenOldGenerationAllocationLimit(intptr_t old_gen_size, | 1294 void DampenOldGenerationAllocationLimit(intptr_t old_gen_size, |
1205 double gc_speed, | 1295 double gc_speed, |
1206 double mutator_speed); | 1296 double mutator_speed); |
1207 | 1297 |
1208 // Indicates whether inline bump-pointer allocation has been disabled. | 1298 // Indicates whether inline bump-pointer allocation has been disabled. |
1209 bool inline_allocation_disabled() { return inline_allocation_disabled_; } | 1299 bool inline_allocation_disabled() { return inline_allocation_disabled_; } |
1210 | 1300 |
1211 // Switch whether inline bump-pointer allocation should be used. | 1301 // Switch whether inline bump-pointer allocation should be used. |
1212 void EnableInlineAllocation(); | 1302 void EnableInlineAllocation(); |
1213 void DisableInlineAllocation(); | 1303 void DisableInlineAllocation(); |
1214 | 1304 |
1215 // Implements the corresponding V8 API function. | 1305 // Implements the corresponding V8 API function. |
1216 bool IdleNotification(double deadline_in_seconds); | 1306 bool IdleNotification(double deadline_in_seconds); |
1217 bool IdleNotification(int idle_time_in_ms); | 1307 bool IdleNotification(int idle_time_in_ms); |
1218 | 1308 |
1219 double MonotonicallyIncreasingTimeInMs(); | 1309 double MonotonicallyIncreasingTimeInMs(); |
1220 | 1310 |
1221 // Declare all the root indices. This defines the root list order. | |
1222 enum RootListIndex { | |
1223 #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, | |
1224 STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION) | |
1225 #undef ROOT_INDEX_DECLARATION | |
1226 | |
1227 #define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex, | |
1228 INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION) | |
1229 #undef STRING_DECLARATION | |
1230 | |
1231 #define SYMBOL_INDEX_DECLARATION(name) k##name##RootIndex, | |
1232 PRIVATE_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION) | |
1233 #undef SYMBOL_INDEX_DECLARATION | |
1234 | |
1235 #define SYMBOL_INDEX_DECLARATION(name, varname, description) k##name##RootIndex, | |
1236 PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION) | |
1237 #undef SYMBOL_INDEX_DECLARATION | |
1238 | |
1239 // Utility type maps | |
1240 #define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex, | |
1241 STRUCT_LIST(DECLARE_STRUCT_MAP) | |
1242 #undef DECLARE_STRUCT_MAP | |
1243 kStringTableRootIndex, | |
1244 | |
1245 #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, | |
1246 SMI_ROOT_LIST(ROOT_INDEX_DECLARATION) | |
1247 #undef ROOT_INDEX_DECLARATION | |
1248 kRootListLength, | |
1249 kStrongRootListLength = kStringTableRootIndex, | |
1250 kSmiRootsStart = kStringTableRootIndex + 1 | |
1251 }; | |
1252 | |
1253 Object* root(RootListIndex index) { return roots_[index]; } | 1311 Object* root(RootListIndex index) { return roots_[index]; } |
1254 | 1312 |
1255 STATIC_ASSERT(kUndefinedValueRootIndex == | |
1256 Internals::kUndefinedValueRootIndex); | |
1257 STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex); | |
1258 STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex); | |
1259 STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex); | |
1260 STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex); | |
1261 | |
1262 // Generated code can embed direct references to non-writable roots if | |
1263 // they are in new space. | |
1264 static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index); | |
1265 // Generated code can treat direct references to this root as constant. | 1313 // Generated code can treat direct references to this root as constant. |
1266 bool RootCanBeTreatedAsConstant(RootListIndex root_index); | 1314 bool RootCanBeTreatedAsConstant(RootListIndex root_index); |
1267 | 1315 |
1268 Map* MapForFixedTypedArray(ExternalArrayType array_type); | 1316 Map* MapForFixedTypedArray(ExternalArrayType array_type); |
1269 RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type); | 1317 RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type); |
1270 | 1318 |
1271 RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind); | 1319 RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind); |
1272 FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map); | 1320 FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map); |
1273 | 1321 |
1274 void RecordStats(HeapStats* stats, bool take_snapshot = false); | 1322 void RecordStats(HeapStats* stats, bool take_snapshot = false); |
1275 | 1323 |
1276 // Copy block of memory from src to dst. Size of block should be aligned | |
1277 // by pointer size. | |
1278 static inline void CopyBlock(Address dst, Address src, int byte_size); | |
1279 | |
1280 // Optimized version of memmove for blocks with pointer size aligned sizes and | |
1281 // pointer size aligned addresses. | |
1282 static inline void MoveBlock(Address dst, Address src, int byte_size); | |
1283 | |
1284 // Check new space expansion criteria and expand semispaces if it was hit. | 1324 // Check new space expansion criteria and expand semispaces if it was hit. |
1285 void CheckNewSpaceExpansionCriteria(); | 1325 void CheckNewSpaceExpansionCriteria(); |
1286 | 1326 |
1287 inline void IncrementPromotedObjectsSize(int object_size) { | 1327 inline void IncrementPromotedObjectsSize(int object_size) { |
1288 DCHECK(object_size > 0); | 1328 DCHECK(object_size > 0); |
1289 promoted_objects_size_ += object_size; | 1329 promoted_objects_size_ += object_size; |
1290 } | 1330 } |
1291 | 1331 |
1292 inline void IncrementSemiSpaceCopiedObjectSize(int object_size) { | 1332 inline void IncrementSemiSpaceCopiedObjectSize(int object_size) { |
1293 DCHECK(object_size > 0); | 1333 DCHECK(object_size > 0); |
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1426 void FreeQueuedChunks(); | 1466 void FreeQueuedChunks(); |
1427 | 1467 |
1428 int gc_count() const { return gc_count_; } | 1468 int gc_count() const { return gc_count_; } |
1429 | 1469 |
1430 bool RecentIdleNotificationHappened(); | 1470 bool RecentIdleNotificationHappened(); |
1431 | 1471 |
1432 // Completely clear the Instanceof cache (to stop it keeping objects alive | 1472 // Completely clear the Instanceof cache (to stop it keeping objects alive |
1433 // around a GC). | 1473 // around a GC). |
1434 inline void CompletelyClearInstanceofCache(); | 1474 inline void CompletelyClearInstanceofCache(); |
1435 | 1475 |
1436 // The roots that have an index less than this are always in old space. | |
1437 static const int kOldSpaceRoots = 0x20; | |
1438 | |
1439 inline uint32_t HashSeed(); | 1476 inline uint32_t HashSeed(); |
1440 | 1477 |
1441 inline Smi* NextScriptId(); | 1478 inline Smi* NextScriptId(); |
1442 | 1479 |
1443 inline void SetArgumentsAdaptorDeoptPCOffset(int pc_offset); | 1480 inline void SetArgumentsAdaptorDeoptPCOffset(int pc_offset); |
1444 inline void SetConstructStubDeoptPCOffset(int pc_offset); | 1481 inline void SetConstructStubDeoptPCOffset(int pc_offset); |
1445 inline void SetGetterStubDeoptPCOffset(int pc_offset); | 1482 inline void SetGetterStubDeoptPCOffset(int pc_offset); |
1446 inline void SetSetterStubDeoptPCOffset(int pc_offset); | 1483 inline void SetSetterStubDeoptPCOffset(int pc_offset); |
1447 | 1484 |
1448 // For post mortem debugging. | 1485 // For post mortem debugging. |
(...skipping 12 matching lines...) Expand all Loading... | |
1461 } | 1498 } |
1462 | 1499 |
1463 void DeoptMarkedAllocationSites(); | 1500 void DeoptMarkedAllocationSites(); |
1464 | 1501 |
1465 bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; } | 1502 bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; } |
1466 | 1503 |
1467 bool DeoptMaybeTenuredAllocationSites() { | 1504 bool DeoptMaybeTenuredAllocationSites() { |
1468 return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0; | 1505 return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0; |
1469 } | 1506 } |
1470 | 1507 |
1471 // ObjectStats are kept in two arrays, counts and sizes. Related stats are | |
1472 // stored in a contiguous linear buffer. Stats groups are stored one after | |
1473 // another. | |
1474 enum { | |
1475 FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1, | |
1476 FIRST_FIXED_ARRAY_SUB_TYPE = | |
1477 FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS, | |
1478 FIRST_CODE_AGE_SUB_TYPE = | |
1479 FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1, | |
1480 OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1 | |
1481 }; | |
1482 | |
1483 void RecordObjectStats(InstanceType type, size_t size) { | 1508 void RecordObjectStats(InstanceType type, size_t size) { |
1484 DCHECK(type <= LAST_TYPE); | 1509 DCHECK(type <= LAST_TYPE); |
1485 object_counts_[type]++; | 1510 object_counts_[type]++; |
1486 object_sizes_[type] += size; | 1511 object_sizes_[type] += size; |
1487 } | 1512 } |
1488 | 1513 |
1489 void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) { | 1514 void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) { |
1490 int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type; | 1515 int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type; |
1491 int code_age_index = | 1516 int code_age_index = |
1492 FIRST_CODE_AGE_SUB_TYPE + code_age - Code::kFirstCodeAge; | 1517 FIRST_CODE_AGE_SUB_TYPE + code_age - Code::kFirstCodeAge; |
(...skipping 15 matching lines...) Expand all Loading... | |
1508 | 1533 |
1509 void TraceObjectStats(); | 1534 void TraceObjectStats(); |
1510 void TraceObjectStat(const char* name, int count, int size, double time); | 1535 void TraceObjectStat(const char* name, int count, int size, double time); |
1511 void CheckpointObjectStats(); | 1536 void CheckpointObjectStats(); |
1512 bool GetObjectTypeName(size_t index, const char** object_type, | 1537 bool GetObjectTypeName(size_t index, const char** object_type, |
1513 const char** object_sub_type); | 1538 const char** object_sub_type); |
1514 | 1539 |
1515 void RegisterStrongRoots(Object** start, Object** end); | 1540 void RegisterStrongRoots(Object** start, Object** end); |
1516 void UnregisterStrongRoots(Object** start); | 1541 void UnregisterStrongRoots(Object** start); |
1517 | 1542 |
1518 // Taking this lock prevents the GC from entering a phase that relocates | |
1519 // object references. | |
1520 class RelocationLock { | |
1521 public: | |
1522 explicit RelocationLock(Heap* heap) : heap_(heap) { | |
1523 heap_->relocation_mutex_.Lock(); | |
1524 } | |
1525 | |
1526 ~RelocationLock() { heap_->relocation_mutex_.Unlock(); } | |
1527 | |
1528 private: | |
1529 Heap* heap_; | |
1530 }; | |
1531 | |
1532 // An optional version of the above lock that can be used for some critical | |
1533 // sections on the mutator thread; only safe since the GC currently does not | |
1534 // do concurrent compaction. | |
1535 class OptionalRelocationLock { | |
1536 public: | |
1537 OptionalRelocationLock(Heap* heap, bool concurrent) | |
1538 : heap_(heap), concurrent_(concurrent) { | |
1539 if (concurrent_) heap_->relocation_mutex_.Lock(); | |
1540 } | |
1541 | |
1542 ~OptionalRelocationLock() { | |
1543 if (concurrent_) heap_->relocation_mutex_.Unlock(); | |
1544 } | |
1545 | |
1546 private: | |
1547 Heap* heap_; | |
1548 bool concurrent_; | |
1549 }; | |
1550 | |
1551 void AddWeakObjectToCodeDependency(Handle<HeapObject> obj, | 1543 void AddWeakObjectToCodeDependency(Handle<HeapObject> obj, |
1552 Handle<DependentCode> dep); | 1544 Handle<DependentCode> dep); |
1553 | 1545 |
1554 DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj); | 1546 DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj); |
1555 | 1547 |
1556 void AddRetainedMap(Handle<Map> map); | 1548 void AddRetainedMap(Handle<Map> map); |
1557 | 1549 |
1558 static void FatalProcessOutOfMemory(const char* location, | |
1559 bool take_snapshot = false); | |
1560 | |
1561 // This event is triggered after successful allocation of a new object made | 1550 // This event is triggered after successful allocation of a new object made |
1562 // by runtime. Allocations of target space for object evacuation do not | 1551 // by runtime. Allocations of target space for object evacuation do not |
1563 // trigger the event. In order to track ALL allocations one must turn off | 1552 // trigger the event. In order to track ALL allocations one must turn off |
1564 // FLAG_inline_new and FLAG_use_allocation_folding. | 1553 // FLAG_inline_new and FLAG_use_allocation_folding. |
1565 inline void OnAllocationEvent(HeapObject* object, int size_in_bytes); | 1554 inline void OnAllocationEvent(HeapObject* object, int size_in_bytes); |
1566 | 1555 |
1567 // This event is triggered after object is moved to a new place. | 1556 // This event is triggered after object is moved to a new place. |
1568 inline void OnMoveEvent(HeapObject* target, HeapObject* source, | 1557 inline void OnMoveEvent(HeapObject* target, HeapObject* source, |
1569 int size_in_bytes); | 1558 int size_in_bytes); |
1570 | 1559 |
(...skipping 21 matching lines...) Expand all Loading... | |
1592 | 1581 |
1593 // An ArrayBuffer moved from new space to old space. | 1582 // An ArrayBuffer moved from new space to old space. |
1594 void PromoteArrayBuffer(Object* buffer); | 1583 void PromoteArrayBuffer(Object* buffer); |
1595 | 1584 |
1596 bool HasLowAllocationRate(); | 1585 bool HasLowAllocationRate(); |
1597 bool HasHighFragmentation(); | 1586 bool HasHighFragmentation(); |
1598 bool HasHighFragmentation(intptr_t used, intptr_t committed); | 1587 bool HasHighFragmentation(intptr_t used, intptr_t committed); |
1599 | 1588 |
1600 bool ShouldOptimizeForMemoryUsage() { return optimize_for_memory_usage_; } | 1589 bool ShouldOptimizeForMemoryUsage() { return optimize_for_memory_usage_; } |
1601 | 1590 |
1591 // Returns a deep copy of the JavaScript object. | |
1592 // Properties and elements are copied too. | |
1593 // Optionally takes an AllocationSite to be appended in an AllocationMemento. | |
1594 MUST_USE_RESULT AllocationResult CopyJSObject(JSObject* source, | |
1595 AllocationSite* site = NULL); | |
1596 | |
1597 // Creates a filler object and returns a heap object immediately after it. | |
1598 MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object, | |
1599 int filler_size); | |
1600 // Creates a filler object if needed for alignment and returns a heap object | |
1601 // immediately after it. If any space is left after the returned object, | |
1602 // another filler object is created so the over allocated memory is iterable. | |
1603 MUST_USE_RESULT HeapObject* AlignWithFiller(HeapObject* object, | |
1604 int object_size, | |
1605 int allocation_size, | |
1606 AllocationAlignment alignment); | |
1607 | |
1602 private: | 1608 private: |
1609 struct StrongRootsList; | |
1610 | |
1611 struct StringTypeTable { | |
1612 InstanceType type; | |
1613 int size; | |
1614 RootListIndex index; | |
1615 }; | |
1616 | |
1617 struct ConstantStringTable { | |
1618 const char* contents; | |
1619 RootListIndex index; | |
1620 }; | |
1621 | |
1622 struct StructTable { | |
1623 InstanceType type; | |
1624 int size; | |
1625 RootListIndex index; | |
1626 }; | |
1627 | |
1628 struct GCCallbackPair { | |
1629 GCCallbackPair(v8::Isolate::GCCallback callback, GCType gc_type, | |
1630 bool pass_isolate) | |
1631 : callback(callback), gc_type(gc_type), pass_isolate(pass_isolate) {} | |
1632 | |
1633 bool operator==(const GCCallbackPair& other) const { | |
1634 return other.callback == callback; | |
1635 } | |
1636 | |
1637 v8::Isolate::GCCallback callback; | |
1638 GCType gc_type; | |
1639 bool pass_isolate; | |
1640 }; | |
1641 | |
1603 static const int kInitialStringTableSize = 2048; | 1642 static const int kInitialStringTableSize = 2048; |
1604 static const int kInitialEvalCacheSize = 64; | 1643 static const int kInitialEvalCacheSize = 64; |
1605 static const int kInitialNumberStringCacheSize = 256; | 1644 static const int kInitialNumberStringCacheSize = 256; |
1606 | 1645 |
1646 static const int kRememberedUnmappedPages = 128; | |
1647 | |
1648 static const StringTypeTable string_type_table[]; | |
1649 static const ConstantStringTable constant_string_table[]; | |
1650 static const StructTable struct_table[]; | |
1651 | |
1652 static const int kYoungSurvivalRateHighThreshold = 90; | |
1653 static const int kYoungSurvivalRateAllowedDeviation = 15; | |
1654 static const int kOldSurvivalRateLowThreshold = 10; | |
1655 | |
1656 static const int kMaxMarkCompactsInIdleRound = 7; | |
1657 static const int kIdleScavengeThreshold = 5; | |
1658 | |
1659 static const int kAllocationSiteScratchpadSize = 256; | |
1660 | |
1607 Heap(); | 1661 Heap(); |
1608 | 1662 |
1663 static String* UpdateNewSpaceReferenceInExternalStringTableEntry( | |
1664 Heap* heap, Object** pointer); | |
1665 | |
1666 static void ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page, | |
1667 StoreBufferEvent event); | |
1668 | |
1669 // Selects the proper allocation space depending on the given object | |
1670 // size and pretenuring decision. | |
1671 static AllocationSpace SelectSpace(int object_size, PretenureFlag pretenure) { | |
1672 if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE; | |
1673 return (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE; | |
1674 } | |
1675 | |
1609 int current_gc_flags() { return current_gc_flags_; } | 1676 int current_gc_flags() { return current_gc_flags_; } |
1677 | |
1610 void set_current_gc_flags(int flags) { | 1678 void set_current_gc_flags(int flags) { |
1611 current_gc_flags_ = flags; | 1679 current_gc_flags_ = flags; |
1612 DCHECK(!ShouldFinalizeIncrementalMarking() || | 1680 DCHECK(!ShouldFinalizeIncrementalMarking() || |
1613 !ShouldAbortIncrementalMarking()); | 1681 !ShouldAbortIncrementalMarking()); |
1614 } | 1682 } |
1615 | 1683 |
1616 inline bool ShouldReduceMemory() const { | 1684 inline bool ShouldReduceMemory() const { |
1617 return current_gc_flags_ & kReduceMemoryFootprintMask; | 1685 return current_gc_flags_ & kReduceMemoryFootprintMask; |
1618 } | 1686 } |
1619 | 1687 |
1620 inline bool ShouldAbortIncrementalMarking() const { | 1688 inline bool ShouldAbortIncrementalMarking() const { |
1621 return current_gc_flags_ & kAbortIncrementalMarkingMask; | 1689 return current_gc_flags_ & kAbortIncrementalMarkingMask; |
1622 } | 1690 } |
1623 | 1691 |
1624 inline bool ShouldFinalizeIncrementalMarking() const { | 1692 inline bool ShouldFinalizeIncrementalMarking() const { |
1625 return current_gc_flags_ & kFinalizeIncrementalMarkingMask; | 1693 return current_gc_flags_ & kFinalizeIncrementalMarkingMask; |
1626 } | 1694 } |
1627 | 1695 |
1628 // Allocates a JS Map in the heap. | |
1629 MUST_USE_RESULT AllocationResult | |
1630 AllocateMap(InstanceType instance_type, int instance_size, | |
1631 ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND); | |
1632 | |
1633 // Allocates and initializes a new JavaScript object based on a | |
1634 // constructor. | |
1635 // If allocation_site is non-null, then a memento is emitted after the object | |
1636 // that points to the site. | |
1637 MUST_USE_RESULT AllocationResult | |
1638 AllocateJSObject(JSFunction* constructor, | |
1639 PretenureFlag pretenure = NOT_TENURED, | |
1640 AllocationSite* allocation_site = NULL); | |
1641 | |
1642 // Allocates and initializes a new JavaScript object based on a map. | |
1643 // Passing an allocation site means that a memento will be created that | |
1644 // points to the site. | |
1645 MUST_USE_RESULT AllocationResult | |
1646 AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED, | |
1647 AllocationSite* allocation_site = NULL); | |
1648 | |
1649 // Allocates a HeapNumber from value. | |
1650 MUST_USE_RESULT AllocationResult | |
1651 AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE, | |
1652 PretenureFlag pretenure = NOT_TENURED); | |
1653 | |
1654 // Allocates SIMD values from the given lane values. | |
1655 #define SIMD_ALLOCATE_DECLARATION(TYPE, Type, type, lane_count, lane_type) \ | |
1656 AllocationResult Allocate##Type(lane_type lanes[lane_count], \ | |
1657 PretenureFlag pretenure = NOT_TENURED); | |
1658 SIMD128_TYPES(SIMD_ALLOCATE_DECLARATION) | |
1659 #undef SIMD_ALLOCATE_DECLARATION | |
1660 | |
1661 // Allocates a byte array of the specified length | |
1662 MUST_USE_RESULT AllocationResult | |
1663 AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED); | |
1664 | |
1665 // Allocates a bytecode array with given contents. | |
1666 MUST_USE_RESULT AllocationResult | |
1667 AllocateBytecodeArray(int length, const byte* raw_bytecodes, | |
1668 int frame_size); | |
1669 | |
1670 // Copy the code and scope info part of the code object, but insert | |
1671 // the provided data as the relocation information. | |
1672 MUST_USE_RESULT AllocationResult | |
1673 CopyCode(Code* code, Vector<byte> reloc_info); | |
1674 | |
1675 MUST_USE_RESULT AllocationResult CopyCode(Code* code); | |
1676 | |
1677 // Allocates a fixed array initialized with undefined values | |
1678 MUST_USE_RESULT AllocationResult | |
1679 AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED); | |
1680 | |
1681 // The amount of external memory registered through the API kept alive | |
1682 // by global handles | |
1683 int64_t amount_of_external_allocated_memory_; | |
1684 | |
1685 // Caches the amount of external memory registered at the last global gc. | |
1686 int64_t amount_of_external_allocated_memory_at_last_global_gc_; | |
1687 | |
1688 // This can be calculated directly from a pointer to the heap; however, it is | |
1689 // more expedient to get at the isolate directly from within Heap methods. | |
1690 Isolate* isolate_; | |
1691 | |
1692 Object* roots_[kRootListLength]; | |
1693 | |
1694 size_t code_range_size_; | |
1695 int reserved_semispace_size_; | |
1696 int max_semi_space_size_; | |
1697 int initial_semispace_size_; | |
1698 int target_semispace_size_; | |
1699 intptr_t max_old_generation_size_; | |
1700 intptr_t initial_old_generation_size_; | |
1701 bool old_generation_size_configured_; | |
1702 intptr_t max_executable_size_; | |
1703 intptr_t maximum_committed_; | |
1704 | |
1705 // For keeping track of how much data has survived | |
1706 // scavenge since last new space expansion. | |
1707 int survived_since_last_expansion_; | |
1708 | |
1709 // ... and since the last scavenge. | |
1710 int survived_last_scavenge_; | |
1711 | |
1712 int always_allocate_scope_depth_; | |
1713 | |
1714 // For keeping track of context disposals. | |
1715 int contexts_disposed_; | |
1716 | |
1717 int global_ic_age_; | |
1718 | |
1719 int scan_on_scavenge_pages_; | |
1720 | |
1721 NewSpace new_space_; | |
1722 OldSpace* old_space_; | |
1723 OldSpace* code_space_; | |
1724 MapSpace* map_space_; | |
1725 LargeObjectSpace* lo_space_; | |
1726 HeapState gc_state_; | |
1727 int gc_post_processing_depth_; | |
1728 Address new_space_top_after_last_gc_; | |
1729 | |
1730 // Returns the amount of external memory registered since last global gc. | |
1731 int64_t PromotedExternalMemorySize(); | |
1732 | |
1733 // How many "runtime allocations" happened. | |
1734 uint32_t allocations_count_; | |
1735 | |
1736 // Running hash over allocations performed. | |
1737 uint32_t raw_allocations_hash_; | |
1738 | |
1739 // Countdown counter, dumps allocation hash when 0. | |
1740 uint32_t dump_allocations_hash_countdown_; | |
1741 | |
1742 // How many mark-sweep collections happened. | |
1743 unsigned int ms_count_; | |
1744 | |
1745 // How many gc happened. | |
1746 unsigned int gc_count_; | |
1747 | |
1748 // For post mortem debugging. | |
1749 static const int kRememberedUnmappedPages = 128; | |
1750 int remembered_unmapped_pages_index_; | |
1751 Address remembered_unmapped_pages_[kRememberedUnmappedPages]; | |
1752 | |
1753 #define ROOT_ACCESSOR(type, name, camel_name) \ | 1696 #define ROOT_ACCESSOR(type, name, camel_name) \ |
1754 inline void set_##name(type* value); | 1697 inline void set_##name(type* value); |
1755 ROOT_LIST(ROOT_ACCESSOR) | 1698 ROOT_LIST(ROOT_ACCESSOR) |
1756 #undef ROOT_ACCESSOR | 1699 #undef ROOT_ACCESSOR |
1757 | 1700 |
1758 #ifdef DEBUG | |
1759 // If the --gc-interval flag is set to a positive value, this | |
1760 // variable holds the value indicating the number of allocations | |
1761 // remain until the next failure and garbage collection. | |
1762 int allocation_timeout_; | |
1763 #endif // DEBUG | |
1764 | |
1765 // Limit that triggers a global GC on the next (normally caused) GC. This | |
1766 // is checked when we have already decided to do a GC to help determine | |
1767 // which collector to invoke, before expanding a paged space in the old | |
1768 // generation and on every allocation in large object space. | |
1769 intptr_t old_generation_allocation_limit_; | |
1770 | |
1771 // Indicates that an allocation has failed in the old generation since the | |
1772 // last GC. | |
1773 bool old_gen_exhausted_; | |
1774 | |
1775 // Indicates that memory usage is more important than latency. | |
1776 // TODO(ulan): Merge it with memory reducer once chromium:490559 is fixed. | |
1777 bool optimize_for_memory_usage_; | |
1778 | |
1779 // Indicates that inline bump-pointer allocation has been globally disabled | |
1780 // for all spaces. This is used to disable allocations in generated code. | |
1781 bool inline_allocation_disabled_; | |
1782 | |
1783 // Weak list heads, threaded through the objects. | |
1784 // List heads are initialized lazily and contain the undefined_value at start. | |
1785 Object* native_contexts_list_; | |
1786 Object* allocation_sites_list_; | |
1787 | |
1788 // List of encountered weak collections (JSWeakMap and JSWeakSet) during | |
1789 // marking. It is initialized during marking, destroyed after marking and | |
1790 // contains Smi(0) while marking is not active. | |
1791 Object* encountered_weak_collections_; | |
1792 | |
1793 Object* encountered_weak_cells_; | |
1794 | |
1795 StoreBufferRebuilder store_buffer_rebuilder_; | |
1796 | |
1797 struct StringTypeTable { | |
1798 InstanceType type; | |
1799 int size; | |
1800 RootListIndex index; | |
1801 }; | |
1802 | |
1803 struct ConstantStringTable { | |
1804 const char* contents; | |
1805 RootListIndex index; | |
1806 }; | |
1807 | |
1808 struct StructTable { | |
1809 InstanceType type; | |
1810 int size; | |
1811 RootListIndex index; | |
1812 }; | |
1813 | |
1814 static const StringTypeTable string_type_table[]; | |
1815 static const ConstantStringTable constant_string_table[]; | |
1816 static const StructTable struct_table[]; | |
1817 | |
1818 struct GCCallbackPair { | |
1819 GCCallbackPair(v8::Isolate::GCCallback callback, GCType gc_type, | |
1820 bool pass_isolate) | |
1821 : callback(callback), gc_type(gc_type), pass_isolate(pass_isolate) {} | |
1822 | |
1823 bool operator==(const GCCallbackPair& other) const { | |
1824 return other.callback == callback; | |
1825 } | |
1826 | |
1827 v8::Isolate::GCCallback callback; | |
1828 GCType gc_type; | |
1829 bool pass_isolate; | |
1830 }; | |
1831 | |
1832 List<GCCallbackPair> gc_epilogue_callbacks_; | |
1833 List<GCCallbackPair> gc_prologue_callbacks_; | |
1834 | |
1835 // Code that should be run before and after each GC. Includes some | 1701 // Code that should be run before and after each GC. Includes some |
1836 // reporting/verification activities when compiled with DEBUG set. | 1702 // reporting/verification activities when compiled with DEBUG set. |
1837 void GarbageCollectionPrologue(); | 1703 void GarbageCollectionPrologue(); |
1838 void GarbageCollectionEpilogue(); | 1704 void GarbageCollectionEpilogue(); |
1839 | 1705 |
1840 void PreprocessStackTraces(); | 1706 void PreprocessStackTraces(); |
1841 | 1707 |
1842 // Pretenuring decisions are made based on feedback collected during new | 1708 // Pretenuring decisions are made based on feedback collected during new |
1843 // space evacuation. Note that between feedback collection and calling this | 1709 // space evacuation. Note that between feedback collection and calling this |
1844 // method object in old space must not move. | 1710 // method object in old space must not move. |
(...skipping 23 matching lines...) Expand all Loading... | |
1868 | 1734 |
1869 // Performs garbage collection | 1735 // Performs garbage collection |
1870 // Returns whether there is a chance another major GC could | 1736 // Returns whether there is a chance another major GC could |
1871 // collect more garbage. | 1737 // collect more garbage. |
1872 bool PerformGarbageCollection( | 1738 bool PerformGarbageCollection( |
1873 GarbageCollector collector, | 1739 GarbageCollector collector, |
1874 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); | 1740 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |
1875 | 1741 |
1876 inline void UpdateOldSpaceLimits(); | 1742 inline void UpdateOldSpaceLimits(); |
1877 | 1743 |
1878 // Selects the proper allocation space depending on the given object | 1744 // Initializes a JSObject based on its map. |
1879 // size and pretenuring decision. | 1745 void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties, |
1880 static AllocationSpace SelectSpace(int object_size, | 1746 Map* map); |
1881 PretenureFlag pretenure) { | 1747 void InitializeAllocationMemento(AllocationMemento* memento, |
1882 if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE; | 1748 AllocationSite* allocation_site); |
1883 return (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE; | 1749 |
1884 } | 1750 bool CreateInitialMaps(); |
1751 void CreateInitialObjects(); | |
1752 | |
1753 // These five Create*EntryStub functions are here and forced to not be inlined | |
1754 // because of a gcc-4.4 bug that assigns wrong vtable entries. | |
1755 NO_INLINE(void CreateJSEntryStub()); | |
1756 NO_INLINE(void CreateJSConstructEntryStub()); | |
1757 | |
1758 void CreateFixedStubs(); | |
1885 | 1759 |
1886 HeapObject* DoubleAlignForDeserialization(HeapObject* object, int size); | 1760 HeapObject* DoubleAlignForDeserialization(HeapObject* object, int size); |
1887 | 1761 |
1762 // Performs a minor collection in new generation. | |
1763 void Scavenge(); | |
1764 | |
1765 // Commits from space if it is uncommitted. | |
1766 void EnsureFromSpaceIsCommitted(); | |
1767 | |
1768 // Uncommit unused semi space. | |
1769 bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); } | |
1770 | |
1771 // Fill in bogus values in from space | |
1772 void ZapFromSpace(); | |
1773 | |
1774 Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front); | |
1775 | |
1776 // Performs a major collection in the whole heap. | |
1777 void MarkCompact(); | |
1778 | |
1779 // Code to be run before and after mark-compact. | |
1780 void MarkCompactPrologue(); | |
1781 void MarkCompactEpilogue(); | |
1782 | |
1783 void ProcessNativeContexts(WeakObjectRetainer* retainer); | |
1784 void ProcessAllocationSites(WeakObjectRetainer* retainer); | |
1785 | |
1786 // Deopts all code that contains allocation instruction which are tenured or | |
1787 // not tenured. Moreover it clears the pretenuring allocation site statistics. | |
1788 void ResetAllAllocationSitesDependentCode(PretenureFlag flag); | |
1789 | |
1790 // Evaluates local pretenuring for the old space and calls | |
1791 // ResetAllTenuredAllocationSitesDependentCode if too many objects died in | |
1792 // the old space. | |
1793 void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc); | |
1794 | |
1795 // Called on heap tear-down. Frees all remaining ArrayBuffer backing stores. | |
1796 void TearDownArrayBuffers(); | |
1797 | |
1798 // These correspond to the non-Helper versions. | |
1799 void RegisterNewArrayBufferHelper(std::map<void*, size_t>& live_buffers, | |
1800 void* data, size_t length); | |
1801 void UnregisterArrayBufferHelper( | |
1802 std::map<void*, size_t>& live_buffers, | |
1803 std::map<void*, size_t>& not_yet_discovered_buffers, void* data); | |
1804 void RegisterLiveArrayBufferHelper( | |
1805 std::map<void*, size_t>& not_yet_discovered_buffers, void* data); | |
1806 size_t FreeDeadArrayBuffersHelper( | |
1807 Isolate* isolate, std::map<void*, size_t>& live_buffers, | |
1808 std::map<void*, size_t>& not_yet_discovered_buffers); | |
1809 void TearDownArrayBuffersHelper( | |
1810 Isolate* isolate, std::map<void*, size_t>& live_buffers, | |
1811 std::map<void*, size_t>& not_yet_discovered_buffers); | |
1812 | |
1813 // Record statistics before and after garbage collection. | |
1814 void ReportStatisticsBeforeGC(); | |
1815 void ReportStatisticsAfterGC(); | |
1816 | |
1817 // Creates and installs the full-sized number string cache. | |
1818 int FullSizeNumberStringCacheLength(); | |
1819 // Flush the number to string cache. | |
1820 void FlushNumberStringCache(); | |
1821 | |
1822 // Sets used allocation sites entries to undefined. | |
1823 void FlushAllocationSitesScratchpad(); | |
1824 | |
1825 // Initializes the allocation sites scratchpad with undefined values. | |
1826 void InitializeAllocationSitesScratchpad(); | |
1827 | |
1828 // Adds an allocation site to the scratchpad if there is space left. | |
1829 void AddAllocationSiteToScratchpad(AllocationSite* site, | |
1830 ScratchpadSlotMode mode); | |
1831 | |
1832 void UpdateSurvivalStatistics(int start_new_space_size); | |
1833 | |
1834 // TODO(hpayer): Allocation site pretenuring may make this method obsolete. | |
1835 // Re-visit incremental marking heuristics. | |
1836 bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; } | |
1837 | |
1838 void ConfigureInitialOldGenerationSize(); | |
1839 | |
1840 void SelectScavengingVisitorsTable(); | |
1841 | |
1842 bool HasLowYoungGenerationAllocationRate(); | |
1843 bool HasLowOldGenerationAllocationRate(); | |
1844 double YoungGenerationMutatorUtilization(); | |
1845 double OldGenerationMutatorUtilization(); | |
1846 | |
1847 void ReduceNewSpaceSize(); | |
1848 | |
1849 bool TryFinalizeIdleIncrementalMarking( | |
1850 double idle_time_in_ms, size_t size_of_objects, | |
1851 size_t mark_compact_speed_in_bytes_per_ms); | |
1852 | |
1853 GCIdleTimeHandler::HeapState ComputeHeapState(); | |
1854 | |
1855 bool PerformIdleTimeAction(GCIdleTimeAction action, | |
1856 GCIdleTimeHandler::HeapState heap_state, | |
1857 double deadline_in_ms); | |
1858 | |
1859 void IdleNotificationEpilogue(GCIdleTimeAction action, | |
1860 GCIdleTimeHandler::HeapState heap_state, | |
1861 double start_ms, double deadline_in_ms); | |
1862 void CheckAndNotifyBackgroundIdleNotification(double idle_time_in_ms, | |
1863 double now_ms); | |
1864 | |
1865 void ClearObjectStats(bool clear_last_time_stats = false); | |
1866 | |
1867 inline void UpdateAllocationsHash(HeapObject* object); | |
1868 inline void UpdateAllocationsHash(uint32_t value); | |
1869 inline void PrintAlloctionsHash(); | |
1870 | |
1871 void AddToRingBuffer(const char* string); | |
1872 void GetFromRingBuffer(char* buffer); | |
1873 | |
1874 // Allocates a JS Map in the heap. | |
1875 MUST_USE_RESULT AllocationResult | |
1876 AllocateMap(InstanceType instance_type, int instance_size, | |
1877 ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND); | |
1878 | |
1879 // Allocates and initializes a new JavaScript object based on a | |
1880 // constructor. | |
1881 // If allocation_site is non-null, then a memento is emitted after the object | |
1882 // that points to the site. | |
1883 MUST_USE_RESULT AllocationResult AllocateJSObject( | |
1884 JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED, | |
1885 AllocationSite* allocation_site = NULL); | |
1886 | |
1887 // Allocates and initializes a new JavaScript object based on a map. | |
1888 // Passing an allocation site means that a memento will be created that | |
1889 // points to the site. | |
1890 MUST_USE_RESULT AllocationResult | |
1891 AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED, | |
1892 AllocationSite* allocation_site = NULL); | |
1893 | |
1894 // Allocates a HeapNumber from value. | |
1895 MUST_USE_RESULT AllocationResult | |
1896 AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE, | |
1897 PretenureFlag pretenure = NOT_TENURED); | |
1898 | |
1899 // Allocates SIMD values from the given lane values. | |
1900 #define SIMD_ALLOCATE_DECLARATION(TYPE, Type, type, lane_count, lane_type) \ | |
1901 AllocationResult Allocate##Type(lane_type lanes[lane_count], \ | |
1902 PretenureFlag pretenure = NOT_TENURED); | |
1903 SIMD128_TYPES(SIMD_ALLOCATE_DECLARATION) | |
1904 #undef SIMD_ALLOCATE_DECLARATION | |
1905 | |
1906 // Allocates a byte array of the specified length | |
1907 MUST_USE_RESULT AllocationResult | |
1908 AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED); | |
1909 | |
1910 // Allocates a bytecode array with given contents. | |
1911 MUST_USE_RESULT AllocationResult | |
1912 AllocateBytecodeArray(int length, const byte* raw_bytecodes, int frame_size); | |
1913 | |
1914 // Copy the code and scope info part of the code object, but insert | |
1915 // the provided data as the relocation information. | |
1916 MUST_USE_RESULT AllocationResult CopyCode(Code* code, | |
1917 Vector<byte> reloc_info); | |
1918 | |
1919 MUST_USE_RESULT AllocationResult CopyCode(Code* code); | |
1920 | |
1921 // Allocates a fixed array initialized with undefined values | |
1922 MUST_USE_RESULT AllocationResult | |
1923 AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED); | |
1924 | |
1888 // Allocate an uninitialized object. The memory is non-executable if the | 1925 // Allocate an uninitialized object. The memory is non-executable if the |
1889 // hardware and OS allow. This is the single choke-point for allocations | 1926 // hardware and OS allow. This is the single choke-point for allocations |
1890 // performed by the runtime and should not be bypassed (to extend this to | 1927 // performed by the runtime and should not be bypassed (to extend this to |
1891 // inlined allocations, use the Heap::DisableInlineAllocation() support). | 1928 // inlined allocations, use the Heap::DisableInlineAllocation() support). |
1892 MUST_USE_RESULT inline AllocationResult AllocateRaw( | 1929 MUST_USE_RESULT inline AllocationResult AllocateRaw( |
1893 int size_in_bytes, AllocationSpace space, AllocationSpace retry_space, | 1930 int size_in_bytes, AllocationSpace space, AllocationSpace retry_space, |
1894 AllocationAlignment aligment = kWordAligned); | 1931 AllocationAlignment aligment = kWordAligned); |
1895 | 1932 |
1896 // Allocates a heap object based on the map. | 1933 // Allocates a heap object based on the map. |
1897 MUST_USE_RESULT AllocationResult | 1934 MUST_USE_RESULT AllocationResult |
1898 Allocate(Map* map, AllocationSpace space, | 1935 Allocate(Map* map, AllocationSpace space, |
1899 AllocationSite* allocation_site = NULL); | 1936 AllocationSite* allocation_site = NULL); |
1900 | 1937 |
1901 // Allocates a partial map for bootstrapping. | 1938 // Allocates a partial map for bootstrapping. |
1902 MUST_USE_RESULT AllocationResult | 1939 MUST_USE_RESULT AllocationResult |
1903 AllocatePartialMap(InstanceType instance_type, int instance_size); | 1940 AllocatePartialMap(InstanceType instance_type, int instance_size); |
1904 | 1941 |
1905 // Initializes a JSObject based on its map. | |
1906 void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties, | |
1907 Map* map); | |
1908 void InitializeAllocationMemento(AllocationMemento* memento, | |
1909 AllocationSite* allocation_site); | |
1910 | |
1911 // Allocate a block of memory in the given space (filled with a filler). | 1942 // Allocate a block of memory in the given space (filled with a filler). |
1912 // Used as a fall-back for generated code when the space is full. | 1943 // Used as a fall-back for generated code when the space is full. |
1913 MUST_USE_RESULT AllocationResult | 1944 MUST_USE_RESULT AllocationResult |
1914 AllocateFillerObject(int size, bool double_align, AllocationSpace space); | 1945 AllocateFillerObject(int size, bool double_align, AllocationSpace space); |
1915 | 1946 |
1916 // Allocate an uninitialized fixed array. | 1947 // Allocate an uninitialized fixed array. |
1917 MUST_USE_RESULT AllocationResult | 1948 MUST_USE_RESULT AllocationResult |
1918 AllocateRawFixedArray(int length, PretenureFlag pretenure); | 1949 AllocateRawFixedArray(int length, PretenureFlag pretenure); |
1919 | 1950 |
1920 // Allocate an uninitialized fixed double array. | 1951 // Allocate an uninitialized fixed double array. |
1921 MUST_USE_RESULT AllocationResult | 1952 MUST_USE_RESULT AllocationResult |
1922 AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure); | 1953 AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure); |
1923 | 1954 |
1924 // Allocate an initialized fixed array with the given filler value. | 1955 // Allocate an initialized fixed array with the given filler value. |
1925 MUST_USE_RESULT AllocationResult | 1956 MUST_USE_RESULT AllocationResult |
1926 AllocateFixedArrayWithFiller(int length, PretenureFlag pretenure, | 1957 AllocateFixedArrayWithFiller(int length, PretenureFlag pretenure, |
1927 Object* filler); | 1958 Object* filler); |
1928 | 1959 |
1929 // Allocate and partially initializes a String. There are two String | 1960 // Allocate and partially initializes a String. There are two String |
1930 // encodings: one-byte and two-byte. These functions allocate a string of | 1961 // encodings: one-byte and two-byte. These functions allocate a string of |
1931 // the given length and set its map and length fields. The characters of | 1962 // the given length and set its map and length fields. The characters of |
1932 // the string are uninitialized. | 1963 // the string are uninitialized. |
1933 MUST_USE_RESULT AllocationResult | 1964 MUST_USE_RESULT AllocationResult |
1934 AllocateRawOneByteString(int length, PretenureFlag pretenure); | 1965 AllocateRawOneByteString(int length, PretenureFlag pretenure); |
1935 MUST_USE_RESULT AllocationResult | 1966 MUST_USE_RESULT AllocationResult |
1936 AllocateRawTwoByteString(int length, PretenureFlag pretenure); | 1967 AllocateRawTwoByteString(int length, PretenureFlag pretenure); |
1937 | 1968 |
1938 bool CreateInitialMaps(); | |
1939 void CreateInitialObjects(); | |
1940 | |
1941 // Allocates an internalized string in old space based on the character | 1969 // Allocates an internalized string in old space based on the character |
1942 // stream. | 1970 // stream. |
1943 MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8( | 1971 MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8( |
1944 Vector<const char> str, int chars, uint32_t hash_field); | 1972 Vector<const char> str, int chars, uint32_t hash_field); |
1945 | 1973 |
1946 MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString( | 1974 MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString( |
1947 Vector<const uint8_t> str, uint32_t hash_field); | 1975 Vector<const uint8_t> str, uint32_t hash_field); |
1948 | 1976 |
1949 MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString( | 1977 MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString( |
1950 Vector<const uc16> str, uint32_t hash_field); | 1978 Vector<const uc16> str, uint32_t hash_field); |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1997 MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src); | 2025 MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src); |
1998 | 2026 |
1999 // Make a copy of src, set the map, and return the copy. | 2027 // Make a copy of src, set the map, and return the copy. |
2000 MUST_USE_RESULT AllocationResult | 2028 MUST_USE_RESULT AllocationResult |
2001 CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, Map* map); | 2029 CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, Map* map); |
2002 | 2030 |
2003 // Allocates a fixed double array with uninitialized values. Returns | 2031 // Allocates a fixed double array with uninitialized values. Returns |
2004 MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray( | 2032 MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray( |
2005 int length, PretenureFlag pretenure = NOT_TENURED); | 2033 int length, PretenureFlag pretenure = NOT_TENURED); |
2006 | 2034 |
2007 // These five Create*EntryStub functions are here and forced to not be inlined | |
2008 // because of a gcc-4.4 bug that assigns wrong vtable entries. | |
2009 NO_INLINE(void CreateJSEntryStub()); | |
2010 NO_INLINE(void CreateJSConstructEntryStub()); | |
2011 | |
2012 void CreateFixedStubs(); | |
2013 | |
2014 // Allocate empty fixed array. | 2035 // Allocate empty fixed array. |
2015 MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray(); | 2036 MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray(); |
2016 | 2037 |
2017 // Allocate empty fixed typed array of given type. | 2038 // Allocate empty fixed typed array of given type. |
2018 MUST_USE_RESULT AllocationResult | 2039 MUST_USE_RESULT AllocationResult |
2019 AllocateEmptyFixedTypedArray(ExternalArrayType array_type); | 2040 AllocateEmptyFixedTypedArray(ExternalArrayType array_type); |
2020 | 2041 |
2021 // Allocate a tenured simple cell. | 2042 // Allocate a tenured simple cell. |
2022 MUST_USE_RESULT AllocationResult AllocateCell(Object* value); | 2043 MUST_USE_RESULT AllocationResult AllocateCell(Object* value); |
2023 | 2044 |
2024 // Allocate a tenured JS global property cell initialized with the hole. | 2045 // Allocate a tenured JS global property cell initialized with the hole. |
2025 MUST_USE_RESULT AllocationResult AllocatePropertyCell(); | 2046 MUST_USE_RESULT AllocationResult AllocatePropertyCell(); |
2026 | 2047 |
2027 MUST_USE_RESULT AllocationResult AllocateWeakCell(HeapObject* value); | 2048 MUST_USE_RESULT AllocationResult AllocateWeakCell(HeapObject* value); |
2028 | 2049 |
2029 // Allocates a new utility object in the old generation. | 2050 // Allocates a new utility object in the old generation. |
2030 MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type); | 2051 MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type); |
2031 | 2052 |
2032 // Allocates a new foreign object. | 2053 // Allocates a new foreign object. |
2033 MUST_USE_RESULT AllocationResult | 2054 MUST_USE_RESULT AllocationResult |
2034 AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED); | 2055 AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED); |
2035 | 2056 |
2036 MUST_USE_RESULT AllocationResult | 2057 MUST_USE_RESULT AllocationResult |
2037 AllocateCode(int object_size, bool immovable); | 2058 AllocateCode(int object_size, bool immovable); |
2038 | 2059 |
2039 MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key); | 2060 MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key); |
2040 | 2061 |
2041 MUST_USE_RESULT AllocationResult InternalizeString(String* str); | 2062 MUST_USE_RESULT AllocationResult InternalizeString(String* str); |
2042 | 2063 |
2043 // Performs a minor collection in new generation. | 2064 // The amount of external memory registered through the API kept alive |
2044 void Scavenge(); | 2065 // by global handles |
2066 int64_t amount_of_external_allocated_memory_; | |
2045 | 2067 |
2046 // Commits from space if it is uncommitted. | 2068 // Caches the amount of external memory registered at the last global gc. |
2047 void EnsureFromSpaceIsCommitted(); | 2069 int64_t amount_of_external_allocated_memory_at_last_global_gc_; |
2048 | 2070 |
2049 // Uncommit unused semi space. | 2071 // This can be calculated directly from a pointer to the heap; however, it is |
2050 bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); } | 2072 // more expedient to get at the isolate directly from within Heap methods. |
2073 Isolate* isolate_; | |
2051 | 2074 |
2052 // Fill in bogus values in from space | 2075 Object* roots_[kRootListLength]; |
2053 void ZapFromSpace(); | |
2054 | 2076 |
2055 static String* UpdateNewSpaceReferenceInExternalStringTableEntry( | 2077 size_t code_range_size_; |
2056 Heap* heap, Object** pointer); | 2078 int reserved_semispace_size_; |
2079 int max_semi_space_size_; | |
2080 int initial_semispace_size_; | |
2081 int target_semispace_size_; | |
2082 intptr_t max_old_generation_size_; | |
2083 intptr_t initial_old_generation_size_; | |
2084 bool old_generation_size_configured_; | |
2085 intptr_t max_executable_size_; | |
2086 intptr_t maximum_committed_; | |
2057 | 2087 |
2058 Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front); | 2088 // For keeping track of how much data has survived |
2059 static void ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page, | 2089 // scavenge since last new space expansion. |
2060 StoreBufferEvent event); | 2090 int survived_since_last_expansion_; |
2061 | 2091 |
2062 // Performs a major collection in the whole heap. | 2092 // ... and since the last scavenge. |
2063 void MarkCompact(); | 2093 int survived_last_scavenge_; |
2064 | 2094 |
2065 // Code to be run before and after mark-compact. | 2095 int always_allocate_scope_depth_; |
2066 void MarkCompactPrologue(); | |
2067 void MarkCompactEpilogue(); | |
2068 | 2096 |
2069 void ProcessNativeContexts(WeakObjectRetainer* retainer); | 2097 // For keeping track of context disposals. |
2070 void ProcessAllocationSites(WeakObjectRetainer* retainer); | 2098 int contexts_disposed_; |
2071 | 2099 |
2072 // Deopts all code that contains allocation instruction which are tenured or | 2100 int global_ic_age_; |
2073 // not tenured. Moreover it clears the pretenuring allocation site statistics. | |
2074 void ResetAllAllocationSitesDependentCode(PretenureFlag flag); | |
2075 | 2101 |
2076 // Evaluates local pretenuring for the old space and calls | 2102 int scan_on_scavenge_pages_; |
2077 // ResetAllTenuredAllocationSitesDependentCode if too many objects died in | |
2078 // the old space. | |
2079 void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc); | |
2080 | 2103 |
2081 // Called on heap tear-down. Frees all remaining ArrayBuffer backing stores. | 2104 NewSpace new_space_; |
2082 void TearDownArrayBuffers(); | 2105 OldSpace* old_space_; |
2106 OldSpace* code_space_; | |
2107 MapSpace* map_space_; | |
2108 LargeObjectSpace* lo_space_; | |
2109 HeapState gc_state_; | |
2110 int gc_post_processing_depth_; | |
2111 Address new_space_top_after_last_gc_; | |
2083 | 2112 |
2084 // These correspond to the non-Helper versions. | 2113 // Returns the amount of external memory registered since last global gc. |
2085 void RegisterNewArrayBufferHelper(std::map<void*, size_t>& live_buffers, | 2114 int64_t PromotedExternalMemorySize(); |
2086 void* data, size_t length); | |
2087 void UnregisterArrayBufferHelper( | |
2088 std::map<void*, size_t>& live_buffers, | |
2089 std::map<void*, size_t>& not_yet_discovered_buffers, void* data); | |
2090 void RegisterLiveArrayBufferHelper( | |
2091 std::map<void*, size_t>& not_yet_discovered_buffers, void* data); | |
2092 size_t FreeDeadArrayBuffersHelper( | |
2093 Isolate* isolate, std::map<void*, size_t>& live_buffers, | |
2094 std::map<void*, size_t>& not_yet_discovered_buffers); | |
2095 void TearDownArrayBuffersHelper( | |
2096 Isolate* isolate, std::map<void*, size_t>& live_buffers, | |
2097 std::map<void*, size_t>& not_yet_discovered_buffers); | |
2098 | 2115 |
2099 // Record statistics before and after garbage collection. | 2116 // How many "runtime allocations" happened. |
2100 void ReportStatisticsBeforeGC(); | 2117 uint32_t allocations_count_; |
2101 void ReportStatisticsAfterGC(); | 2118 |
2119 // Running hash over allocations performed. | |
2120 uint32_t raw_allocations_hash_; | |
2121 | |
2122 // Countdown counter, dumps allocation hash when 0. | |
2123 uint32_t dump_allocations_hash_countdown_; | |
2124 | |
2125 // How many mark-sweep collections happened. | |
2126 unsigned int ms_count_; | |
2127 | |
2128 // How many gc happened. | |
2129 unsigned int gc_count_; | |
2130 | |
2131 // For post mortem debugging. | |
2132 int remembered_unmapped_pages_index_; | |
2133 Address remembered_unmapped_pages_[kRememberedUnmappedPages]; | |
2134 | |
2135 #ifdef DEBUG | |
2136 // If the --gc-interval flag is set to a positive value, this | |
2137 // variable holds the value indicating the number of allocations | |
2138 // remain until the next failure and garbage collection. | |
2139 int allocation_timeout_; | |
2140 #endif // DEBUG | |
2141 | |
2142 // Limit that triggers a global GC on the next (normally caused) GC. This | |
2143 // is checked when we have already decided to do a GC to help determine | |
2144 // which collector to invoke, before expanding a paged space in the old | |
2145 // generation and on every allocation in large object space. | |
2146 intptr_t old_generation_allocation_limit_; | |
2147 | |
2148 // Indicates that an allocation has failed in the old generation since the | |
2149 // last GC. | |
2150 bool old_gen_exhausted_; | |
2151 | |
2152 // Indicates that memory usage is more important than latency. | |
2153 // TODO(ulan): Merge it with memory reducer once chromium:490559 is fixed. | |
2154 bool optimize_for_memory_usage_; | |
2155 | |
2156 // Indicates that inline bump-pointer allocation has been globally disabled | |
2157 // for all spaces. This is used to disable allocations in generated code. | |
2158 bool inline_allocation_disabled_; | |
2159 | |
2160 // Weak list heads, threaded through the objects. | |
2161 // List heads are initialized lazily and contain the undefined_value at start. | |
2162 Object* native_contexts_list_; | |
2163 Object* allocation_sites_list_; | |
2164 | |
2165 // List of encountered weak collections (JSWeakMap and JSWeakSet) during | |
2166 // marking. It is initialized during marking, destroyed after marking and | |
2167 // contains Smi(0) while marking is not active. | |
2168 Object* encountered_weak_collections_; | |
2169 | |
2170 Object* encountered_weak_cells_; | |
2171 | |
2172 StoreBufferRebuilder store_buffer_rebuilder_; | |
2173 | |
2174 List<GCCallbackPair> gc_epilogue_callbacks_; | |
2175 List<GCCallbackPair> gc_prologue_callbacks_; | |
2102 | 2176 |
2103 // Total RegExp code ever generated | 2177 // Total RegExp code ever generated |
2104 double total_regexp_code_generated_; | 2178 double total_regexp_code_generated_; |
2105 | 2179 |
2106 int deferred_counters_[v8::Isolate::kUseCounterFeatureCount]; | 2180 int deferred_counters_[v8::Isolate::kUseCounterFeatureCount]; |
2107 | 2181 |
2108 GCTracer* tracer_; | 2182 GCTracer* tracer_; |
2109 | 2183 |
2110 // Creates and installs the full-sized number string cache. | |
2111 int FullSizeNumberStringCacheLength(); | |
2112 // Flush the number to string cache. | |
2113 void FlushNumberStringCache(); | |
2114 | |
2115 // Sets used allocation sites entries to undefined. | |
2116 void FlushAllocationSitesScratchpad(); | |
2117 | |
2118 // Initializes the allocation sites scratchpad with undefined values. | |
2119 void InitializeAllocationSitesScratchpad(); | |
2120 | |
2121 // Adds an allocation site to the scratchpad if there is space left. | |
2122 void AddAllocationSiteToScratchpad(AllocationSite* site, | |
2123 ScratchpadSlotMode mode); | |
2124 | |
2125 void UpdateSurvivalStatistics(int start_new_space_size); | |
2126 | |
2127 static const int kYoungSurvivalRateHighThreshold = 90; | |
2128 static const int kYoungSurvivalRateAllowedDeviation = 15; | |
2129 | |
2130 static const int kOldSurvivalRateLowThreshold = 10; | |
2131 | |
2132 int high_survival_rate_period_length_; | 2184 int high_survival_rate_period_length_; |
2133 intptr_t promoted_objects_size_; | 2185 intptr_t promoted_objects_size_; |
2134 double promotion_ratio_; | 2186 double promotion_ratio_; |
2135 double promotion_rate_; | 2187 double promotion_rate_; |
2136 intptr_t semi_space_copied_object_size_; | 2188 intptr_t semi_space_copied_object_size_; |
2137 intptr_t previous_semi_space_copied_object_size_; | 2189 intptr_t previous_semi_space_copied_object_size_; |
2138 double semi_space_copied_rate_; | 2190 double semi_space_copied_rate_; |
2139 int nodes_died_in_new_space_; | 2191 int nodes_died_in_new_space_; |
2140 int nodes_copied_in_new_space_; | 2192 int nodes_copied_in_new_space_; |
2141 int nodes_promoted_; | 2193 int nodes_promoted_; |
2142 | 2194 |
2143 // This is the pretenuring trigger for allocation sites that are in maybe | 2195 // This is the pretenuring trigger for allocation sites that are in maybe |
2144 // tenure state. When we switched to the maximum new space size we deoptimize | 2196 // tenure state. When we switched to the maximum new space size we deoptimize |
2145 // the code that belongs to the allocation site and derive the lifetime | 2197 // the code that belongs to the allocation site and derive the lifetime |
2146 // of the allocation site. | 2198 // of the allocation site. |
2147 unsigned int maximum_size_scavenges_; | 2199 unsigned int maximum_size_scavenges_; |
2148 | 2200 |
2149 // TODO(hpayer): Allocation site pretenuring may make this method obsolete. | |
2150 // Re-visit incremental marking heuristics. | |
2151 bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; } | |
2152 | |
2153 void ConfigureInitialOldGenerationSize(); | |
2154 | |
2155 void SelectScavengingVisitorsTable(); | |
2156 | |
2157 bool HasLowYoungGenerationAllocationRate(); | |
2158 bool HasLowOldGenerationAllocationRate(); | |
2159 double YoungGenerationMutatorUtilization(); | |
2160 double OldGenerationMutatorUtilization(); | |
2161 | |
2162 void ReduceNewSpaceSize(); | |
2163 | |
2164 bool TryFinalizeIdleIncrementalMarking( | |
2165 double idle_time_in_ms, size_t size_of_objects, | |
2166 size_t mark_compact_speed_in_bytes_per_ms); | |
2167 | |
2168 GCIdleTimeHandler::HeapState ComputeHeapState(); | |
2169 | |
2170 bool PerformIdleTimeAction(GCIdleTimeAction action, | |
2171 GCIdleTimeHandler::HeapState heap_state, | |
2172 double deadline_in_ms); | |
2173 | |
2174 void IdleNotificationEpilogue(GCIdleTimeAction action, | |
2175 GCIdleTimeHandler::HeapState heap_state, | |
2176 double start_ms, double deadline_in_ms); | |
2177 void CheckAndNotifyBackgroundIdleNotification(double idle_time_in_ms, | |
2178 double now_ms); | |
2179 | |
2180 void ClearObjectStats(bool clear_last_time_stats = false); | |
2181 | |
2182 inline void UpdateAllocationsHash(HeapObject* object); | |
2183 inline void UpdateAllocationsHash(uint32_t value); | |
2184 inline void PrintAlloctionsHash(); | |
2185 | |
2186 void AddToRingBuffer(const char* string); | |
2187 void GetFromRingBuffer(char* buffer); | |
2188 | |
2189 // Object counts and used memory by InstanceType | 2201 // Object counts and used memory by InstanceType |
2190 size_t object_counts_[OBJECT_STATS_COUNT]; | 2202 size_t object_counts_[OBJECT_STATS_COUNT]; |
2191 size_t object_counts_last_time_[OBJECT_STATS_COUNT]; | 2203 size_t object_counts_last_time_[OBJECT_STATS_COUNT]; |
2192 size_t object_sizes_[OBJECT_STATS_COUNT]; | 2204 size_t object_sizes_[OBJECT_STATS_COUNT]; |
2193 size_t object_sizes_last_time_[OBJECT_STATS_COUNT]; | 2205 size_t object_sizes_last_time_[OBJECT_STATS_COUNT]; |
2194 | 2206 |
2195 // Maximum GC pause. | 2207 // Maximum GC pause. |
2196 double max_gc_pause_; | 2208 double max_gc_pause_; |
2197 | 2209 |
2198 // Total time spent in GC. | 2210 // Total time spent in GC. |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2241 size_t old_generation_allocation_counter_; | 2253 size_t old_generation_allocation_counter_; |
2242 | 2254 |
2243 // The size of objects in old generation after the last MarkCompact GC. | 2255 // The size of objects in old generation after the last MarkCompact GC. |
2244 size_t old_generation_size_at_last_gc_; | 2256 size_t old_generation_size_at_last_gc_; |
2245 | 2257 |
2246 // If the --deopt_every_n_garbage_collections flag is set to a positive value, | 2258 // If the --deopt_every_n_garbage_collections flag is set to a positive value, |
2247 // this variable holds the number of garbage collections since the last | 2259 // this variable holds the number of garbage collections since the last |
2248 // deoptimization triggered by garbage collection. | 2260 // deoptimization triggered by garbage collection. |
2249 int gcs_since_last_deopt_; | 2261 int gcs_since_last_deopt_; |
2250 | 2262 |
2251 static const int kAllocationSiteScratchpadSize = 256; | |
2252 int allocation_sites_scratchpad_length_; | 2263 int allocation_sites_scratchpad_length_; |
2253 | 2264 |
2254 char trace_ring_buffer_[kTraceRingBufferSize]; | 2265 char trace_ring_buffer_[kTraceRingBufferSize]; |
2255 // If it's not full then the data is from 0 to ring_buffer_end_. If it's | 2266 // If it's not full then the data is from 0 to ring_buffer_end_. If it's |
2256 // full then the data is from ring_buffer_end_ to the end of the buffer and | 2267 // full then the data is from ring_buffer_end_ to the end of the buffer and |
2257 // from 0 to ring_buffer_end_. | 2268 // from 0 to ring_buffer_end_. |
2258 bool ring_buffer_full_; | 2269 bool ring_buffer_full_; |
2259 size_t ring_buffer_end_; | 2270 size_t ring_buffer_end_; |
2260 | 2271 |
2261 static const int kMaxMarkCompactsInIdleRound = 7; | |
2262 static const int kIdleScavengeThreshold = 5; | |
2263 | |
2264 // Shared state read by the scavenge collector and set by ScavengeObject. | 2272 // Shared state read by the scavenge collector and set by ScavengeObject. |
2265 PromotionQueue promotion_queue_; | 2273 PromotionQueue promotion_queue_; |
2266 | 2274 |
2267 // Flag is set when the heap has been configured. The heap can be repeatedly | 2275 // Flag is set when the heap has been configured. The heap can be repeatedly |
2268 // configured through the API until it is set up. | 2276 // configured through the API until it is set up. |
2269 bool configured_; | 2277 bool configured_; |
2270 | 2278 |
2271 // Currently set GC flags that are respected by all GC components. | 2279 // Currently set GC flags that are respected by all GC components. |
2272 int current_gc_flags_; | 2280 int current_gc_flags_; |
2273 | 2281 |
(...skipping 23 matching lines...) Expand all Loading... | |
2297 | 2305 |
2298 // To be able to free memory held by ArrayBuffers during scavenge as well, we | 2306 // To be able to free memory held by ArrayBuffers during scavenge as well, we |
2299 // have a separate list of allocated memory held by ArrayBuffers in new space. | 2307 // have a separate list of allocated memory held by ArrayBuffers in new space. |
2300 // | 2308 // |
2301 // Since mark/compact also evacuates the new space, all pointers in the | 2309 // Since mark/compact also evacuates the new space, all pointers in the |
2302 // |live_array_buffers_for_scavenge_| list are also in the | 2310 // |live_array_buffers_for_scavenge_| list are also in the |
2303 // |live_array_buffers_| list. | 2311 // |live_array_buffers_| list. |
2304 std::map<void*, size_t> live_array_buffers_for_scavenge_; | 2312 std::map<void*, size_t> live_array_buffers_for_scavenge_; |
2305 std::map<void*, size_t> not_yet_discovered_array_buffers_for_scavenge_; | 2313 std::map<void*, size_t> not_yet_discovered_array_buffers_for_scavenge_; |
2306 | 2314 |
2307 struct StrongRootsList; | |
2308 StrongRootsList* strong_roots_list_; | 2315 StrongRootsList* strong_roots_list_; |
2309 | 2316 |
2310 friend class AlwaysAllocateScope; | 2317 friend class AlwaysAllocateScope; |
2311 friend class Bootstrapper; | 2318 friend class Bootstrapper; |
2312 friend class Deserializer; | 2319 friend class Deserializer; |
2313 friend class Factory; | 2320 friend class Factory; |
2314 friend class GCCallbacksScope; | 2321 friend class GCCallbacksScope; |
2315 friend class GCTracer; | 2322 friend class GCTracer; |
2316 friend class HeapIterator; | 2323 friend class HeapIterator; |
2317 friend class IncrementalMarking; | 2324 friend class IncrementalMarking; |
(...skipping 369 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2687 DisallowHeapAllocation no_allocation; // i.e. no gc allowed. | 2694 DisallowHeapAllocation no_allocation; // i.e. no gc allowed. |
2688 | 2695 |
2689 private: | 2696 private: |
2690 DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer); | 2697 DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer); |
2691 }; | 2698 }; |
2692 #endif // DEBUG | 2699 #endif // DEBUG |
2693 } | 2700 } |
2694 } // namespace v8::internal | 2701 } // namespace v8::internal |
2695 | 2702 |
2696 #endif // V8_HEAP_HEAP_H_ | 2703 #endif // V8_HEAP_HEAP_H_ |
OLD | NEW |