| OLD | NEW | 
|     1 // Copyright 2012 the V8 project authors. All rights reserved. |     1 // Copyright 2012 the V8 project authors. All rights reserved. | 
|     2 // Use of this source code is governed by a BSD-style license that can be |     2 // Use of this source code is governed by a BSD-style license that can be | 
|     3 // found in the LICENSE file. |     3 // found in the LICENSE file. | 
|     4  |     4  | 
|     5 #ifndef V8_HEAP_HEAP_H_ |     5 #ifndef V8_HEAP_HEAP_H_ | 
|     6 #define V8_HEAP_HEAP_H_ |     6 #define V8_HEAP_HEAP_H_ | 
|     7  |     7  | 
|     8 #include <cmath> |     8 #include <cmath> | 
|     9 #include <map> |     9 #include <map> | 
|    10  |    10  | 
| (...skipping 559 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
|   570  |   570  | 
|   571  |   571  | 
|   572 enum ArrayStorageAllocationMode { |   572 enum ArrayStorageAllocationMode { | 
|   573   DONT_INITIALIZE_ARRAY_ELEMENTS, |   573   DONT_INITIALIZE_ARRAY_ELEMENTS, | 
|   574   INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE |   574   INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE | 
|   575 }; |   575 }; | 
|   576  |   576  | 
|   577  |   577  | 
|   578 class Heap { |   578 class Heap { | 
|   579  public: |   579  public: | 
|   580   // Configure heap size in MB before setup. Return false if the heap has been |   580   // Declare all the root indices.  This defines the root list order. | 
|   581   // set up already. |   581   enum RootListIndex { | 
|   582   bool ConfigureHeap(int max_semi_space_size, int max_old_space_size, |   582 #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, | 
|   583                      int max_executable_size, size_t code_range_size); |   583     STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION) | 
|   584   bool ConfigureHeapDefault(); |   584 #undef ROOT_INDEX_DECLARATION | 
|   585  |   585  | 
|   586   // Prepares the heap, setting up memory areas that are needed in the isolate |   586 #define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex, | 
|   587   // without actually creating any objects. |   587         INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION) | 
|   588   bool SetUp(); |   588 #undef STRING_DECLARATION | 
|   589  |   589  | 
|   590   // Bootstraps the object heap with the core set of objects required to run. |   590 #define SYMBOL_INDEX_DECLARATION(name) k##name##RootIndex, | 
|   591   // Returns whether it succeeded. |   591             PRIVATE_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION) | 
|   592   bool CreateHeapObjects(); |   592 #undef SYMBOL_INDEX_DECLARATION | 
|   593  |   593  | 
|   594   // Destroys all memory allocated by the heap. |   594 #define SYMBOL_INDEX_DECLARATION(name, varname, description) k##name##RootIndex, | 
|   595   void TearDown(); |   595                 PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION) | 
 |   596 #undef SYMBOL_INDEX_DECLARATION | 
 |   597  | 
 |   598 // Utility type maps | 
 |   599 #define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex, | 
 |   600                     STRUCT_LIST(DECLARE_STRUCT_MAP) | 
 |   601 #undef DECLARE_STRUCT_MAP | 
 |   602                         kStringTableRootIndex, | 
 |   603  | 
 |   604 #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, | 
 |   605     SMI_ROOT_LIST(ROOT_INDEX_DECLARATION) | 
 |   606 #undef ROOT_INDEX_DECLARATION | 
 |   607         kRootListLength, | 
 |   608     kStrongRootListLength = kStringTableRootIndex, | 
 |   609     kSmiRootsStart = kStringTableRootIndex + 1 | 
 |   610   }; | 
 |   611  | 
 |   612   // Indicates whether live bytes adjustment is triggered | 
 |   613   // - from within the GC code before sweeping started (SEQUENTIAL_TO_SWEEPER), | 
 |   614   // - or from within GC (CONCURRENT_TO_SWEEPER), | 
 |   615   // - or mutator code (CONCURRENT_TO_SWEEPER). | 
 |   616   enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER }; | 
 |   617  | 
 |   618   enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT }; | 
 |   619  | 
 |   620   enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT }; | 
 |   621  | 
 |   622   // ObjectStats are kept in two arrays, counts and sizes. Related stats are | 
 |   623   // stored in a contiguous linear buffer. Stats groups are stored one after | 
 |   624   // another. | 
 |   625   enum { | 
 |   626     FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1, | 
 |   627     FIRST_FIXED_ARRAY_SUB_TYPE = | 
 |   628         FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS, | 
 |   629     FIRST_CODE_AGE_SUB_TYPE = | 
 |   630         FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1, | 
 |   631     OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1 | 
 |   632   }; | 
 |   633  | 
 |   634   // Taking this lock prevents the GC from entering a phase that relocates | 
 |   635   // object references. | 
 |   636   class RelocationLock { | 
 |   637    public: | 
 |   638     explicit RelocationLock(Heap* heap) : heap_(heap) { | 
 |   639       heap_->relocation_mutex_.Lock(); | 
 |   640     } | 
 |   641  | 
 |   642     ~RelocationLock() { heap_->relocation_mutex_.Unlock(); } | 
 |   643  | 
 |   644    private: | 
 |   645     Heap* heap_; | 
 |   646   }; | 
 |   647  | 
 |   648   // An optional version of the above lock that can be used for some critical | 
 |   649   // sections on the mutator thread; only safe since the GC currently does not | 
 |   650   // do concurrent compaction. | 
 |   651   class OptionalRelocationLock { | 
 |   652    public: | 
 |   653     OptionalRelocationLock(Heap* heap, bool concurrent) | 
 |   654         : heap_(heap), concurrent_(concurrent) { | 
 |   655       if (concurrent_) heap_->relocation_mutex_.Lock(); | 
 |   656     } | 
 |   657  | 
 |   658     ~OptionalRelocationLock() { | 
 |   659       if (concurrent_) heap_->relocation_mutex_.Unlock(); | 
 |   660     } | 
 |   661  | 
 |   662    private: | 
 |   663     Heap* heap_; | 
 |   664     bool concurrent_; | 
 |   665   }; | 
 |   666  | 
 |   667   // Support for partial snapshots.  After calling this we have a linear | 
 |   668   // space to write objects in each space. | 
 |   669   struct Chunk { | 
 |   670     uint32_t size; | 
 |   671     Address start; | 
 |   672     Address end; | 
 |   673   }; | 
 |   674   typedef List<Chunk> Reservation; | 
 |   675  | 
 |   676   static const intptr_t kMinimumOldGenerationAllocationLimit = | 
 |   677       8 * (Page::kPageSize > MB ? Page::kPageSize : MB); | 
 |   678  | 
 |   679   static const int kInitalOldGenerationLimitFactor = 2; | 
 |   680  | 
 |   681 #if V8_OS_ANDROID | 
 |   682   // Don't apply pointer multiplier on Android since it has no swap space and | 
 |   683   // should instead adapt it's heap size based on available physical memory. | 
 |   684   static const int kPointerMultiplier = 1; | 
 |   685 #else | 
 |   686   static const int kPointerMultiplier = i::kPointerSize / 4; | 
 |   687 #endif | 
 |   688  | 
 |   689   // The new space size has to be a power of 2. Sizes are in MB. | 
 |   690   static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier; | 
 |   691   static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier; | 
 |   692   static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier; | 
 |   693   static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier; | 
 |   694  | 
 |   695   // The old space size has to be a multiple of Page::kPageSize. | 
 |   696   // Sizes are in MB. | 
 |   697   static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier; | 
 |   698   static const int kMaxOldSpaceSizeMediumMemoryDevice = | 
 |   699       256 * kPointerMultiplier; | 
 |   700   static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier; | 
 |   701   static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier; | 
 |   702  | 
 |   703   // The executable size has to be a multiple of Page::kPageSize. | 
 |   704   // Sizes are in MB. | 
 |   705   static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier; | 
 |   706   static const int kMaxExecutableSizeMediumMemoryDevice = | 
 |   707       192 * kPointerMultiplier; | 
 |   708   static const int kMaxExecutableSizeHighMemoryDevice = | 
 |   709       256 * kPointerMultiplier; | 
 |   710   static const int kMaxExecutableSizeHugeMemoryDevice = | 
 |   711       256 * kPointerMultiplier; | 
 |   712  | 
 |   713   static const int kTraceRingBufferSize = 512; | 
 |   714   static const int kStacktraceBufferSize = 512; | 
 |   715  | 
 |   716   static const double kMinHeapGrowingFactor; | 
 |   717   static const double kMaxHeapGrowingFactor; | 
 |   718   static const double kMaxHeapGrowingFactorMemoryConstrained; | 
 |   719   static const double kMaxHeapGrowingFactorIdle; | 
 |   720   static const double kTargetMutatorUtilization; | 
 |   721  | 
 |   722   // Sloppy mode arguments object size. | 
 |   723   static const int kSloppyArgumentsObjectSize = | 
 |   724       JSObject::kHeaderSize + 2 * kPointerSize; | 
 |   725  | 
 |   726   // Strict mode arguments has no callee so it is smaller. | 
 |   727   static const int kStrictArgumentsObjectSize = | 
 |   728       JSObject::kHeaderSize + 1 * kPointerSize; | 
 |   729  | 
 |   730   // Indicies for direct access into argument objects. | 
 |   731   static const int kArgumentsLengthIndex = 0; | 
 |   732  | 
 |   733   // callee is only valid in sloppy mode. | 
 |   734   static const int kArgumentsCalleeIndex = 1; | 
 |   735  | 
 |   736   static const int kNoGCFlags = 0; | 
 |   737   static const int kReduceMemoryFootprintMask = 1; | 
 |   738   static const int kAbortIncrementalMarkingMask = 2; | 
 |   739   static const int kFinalizeIncrementalMarkingMask = 4; | 
 |   740  | 
 |   741   // Making the heap iterable requires us to abort incremental marking. | 
 |   742   static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask; | 
 |   743  | 
 |   744   // The roots that have an index less than this are always in old space. | 
 |   745   static const int kOldSpaceRoots = 0x20; | 
 |   746  | 
 |   747   STATIC_ASSERT(kUndefinedValueRootIndex == | 
 |   748                 Internals::kUndefinedValueRootIndex); | 
 |   749   STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex); | 
 |   750   STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex); | 
 |   751   STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex); | 
 |   752   STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex); | 
 |   753  | 
 |   754   // Calculates the maximum amount of filler that could be required by the | 
 |   755   // given alignment. | 
 |   756   static int GetMaximumFillToAlign(AllocationAlignment alignment); | 
 |   757   // Calculates the actual amount of filler required for a given address at the | 
 |   758   // given alignment. | 
 |   759   static int GetFillToAlign(Address address, AllocationAlignment alignment); | 
 |   760  | 
 |   761   template <typename T> | 
 |   762   static inline bool IsOneByte(T t, int chars); | 
 |   763  | 
 |   764   // Callback function passed to Heap::Iterate etc.  Copies an object if | 
 |   765   // necessary, the object might be promoted to an old space.  The caller must | 
 |   766   // ensure the precondition that the object is (a) a heap object and (b) in | 
 |   767   // the heap's from space. | 
 |   768   static inline void ScavengePointer(HeapObject** p); | 
 |   769   static inline void ScavengeObject(HeapObject** p, HeapObject* object); | 
 |   770  | 
 |   771   // Slow part of scavenge object. | 
 |   772   static void ScavengeObjectSlow(HeapObject** p, HeapObject* object); | 
 |   773  | 
 |   774   static void FatalProcessOutOfMemory(const char* location, | 
 |   775                                       bool take_snapshot = false); | 
 |   776  | 
 |   777   static bool RootIsImmortalImmovable(int root_index); | 
 |   778  | 
 |   779   // Checks whether the space is valid. | 
 |   780   static bool IsValidAllocationSpace(AllocationSpace space); | 
 |   781  | 
 |   782   // An object may have an AllocationSite associated with it through a trailing | 
 |   783   // AllocationMemento. Its feedback should be updated when objects are found | 
 |   784   // in the heap. | 
 |   785   static inline void UpdateAllocationSiteFeedback(HeapObject* object, | 
 |   786                                                   ScratchpadSlotMode mode); | 
 |   787  | 
 |   788   // Generated code can embed direct references to non-writable roots if | 
 |   789   // they are in new space. | 
 |   790   static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index); | 
 |   791  | 
 |   792   // Zapping is needed for verify heap, and always done in debug builds. | 
 |   793   static inline bool ShouldZapGarbage() { | 
 |   794 #ifdef DEBUG | 
 |   795     return true; | 
 |   796 #else | 
 |   797 #ifdef VERIFY_HEAP | 
 |   798     return FLAG_verify_heap; | 
 |   799 #else | 
 |   800     return false; | 
 |   801 #endif | 
 |   802 #endif | 
 |   803   } | 
 |   804  | 
 |   805   static double HeapGrowingFactor(double gc_speed, double mutator_speed); | 
 |   806  | 
 |   807   // Copy block of memory from src to dst. Size of block should be aligned | 
 |   808   // by pointer size. | 
 |   809   static inline void CopyBlock(Address dst, Address src, int byte_size); | 
 |   810  | 
 |   811   // Optimized version of memmove for blocks with pointer size aligned sizes and | 
 |   812   // pointer size aligned addresses. | 
 |   813   static inline void MoveBlock(Address dst, Address src, int byte_size); | 
|   596  |   814  | 
|   597   // Set the stack limit in the roots_ array.  Some architectures generate |   815   // Set the stack limit in the roots_ array.  Some architectures generate | 
|   598   // code that looks here, because it is faster than loading from the static |   816   // code that looks here, because it is faster than loading from the static | 
|   599   // jslimit_/real_jslimit_ variable in the StackGuard. |   817   // jslimit_/real_jslimit_ variable in the StackGuard. | 
|   600   void SetStackLimits(); |   818   void SetStackLimits(); | 
|   601  |   819  | 
|   602   // Notifies the heap that is ok to start marking or other activities that |   820   // Notifies the heap that is ok to start marking or other activities that | 
|   603   // should not happen during deserialization. |   821   // should not happen during deserialization. | 
|   604   void NotifyDeserializationComplete(); |   822   void NotifyDeserializationComplete(); | 
|   605  |   823  | 
|   606   // Returns whether SetUp has been called. |   824   // Returns whether SetUp has been called. | 
|   607   bool HasBeenSetUp(); |   825   bool HasBeenSetUp(); | 
|   608  |   826  | 
|   609   // Returns the maximum amount of memory reserved for the heap.  For |  | 
|   610   // the young generation, we reserve 4 times the amount needed for a |  | 
|   611   // semi space.  The young generation consists of two semi spaces and |  | 
|   612   // we reserve twice the amount needed for those in order to ensure |  | 
|   613   // that new space can be aligned to its size. |  | 
|   614   intptr_t MaxReserved() { |  | 
|   615     return 4 * reserved_semispace_size_ + max_old_generation_size_; |  | 
|   616   } |  | 
|   617   int MaxSemiSpaceSize() { return max_semi_space_size_; } |  | 
|   618   int ReservedSemiSpaceSize() { return reserved_semispace_size_; } |  | 
|   619   int InitialSemiSpaceSize() { return initial_semispace_size_; } |  | 
|   620   int TargetSemiSpaceSize() { return target_semispace_size_; } |  | 
|   621   intptr_t MaxOldGenerationSize() { return max_old_generation_size_; } |  | 
|   622   intptr_t MaxExecutableSize() { return max_executable_size_; } |  | 
|   623  |  | 
|   624   // Returns the capacity of the heap in bytes w/o growing. Heap grows when |  | 
|   625   // more spaces are needed until it reaches the limit. |  | 
|   626   intptr_t Capacity(); |  | 
|   627  |  | 
|   628   // Returns the amount of memory currently committed for the heap. |  | 
|   629   intptr_t CommittedMemory(); |  | 
|   630  |  | 
|   631   // Returns the amount of memory currently committed for the old space. |  | 
|   632   intptr_t CommittedOldGenerationMemory(); |  | 
|   633  |  | 
|   634   // Returns the amount of executable memory currently committed for the heap. |  | 
|   635   intptr_t CommittedMemoryExecutable(); |  | 
|   636  |  | 
|   637   // Returns the amount of phyical memory currently committed for the heap. |  | 
|   638   size_t CommittedPhysicalMemory(); |  | 
|   639  |  | 
|   640   // Returns the maximum amount of memory ever committed for the heap. |  | 
|   641   intptr_t MaximumCommittedMemory() { return maximum_committed_; } |  | 
|   642  |  | 
|   643   // Updates the maximum committed memory for the heap. Should be called |  | 
|   644   // whenever a space grows. |  | 
|   645   void UpdateMaximumCommitted(); |  | 
|   646  |  | 
|   647   // Returns the available bytes in space w/o growing. |  | 
|   648   // Heap doesn't guarantee that it can allocate an object that requires |  | 
|   649   // all available bytes. Check MaxHeapObjectSize() instead. |  | 
|   650   intptr_t Available(); |  | 
|   651  |  | 
|   652   // Returns of size of all objects residing in the heap. |  | 
|   653   intptr_t SizeOfObjects(); |  | 
|   654  |  | 
|   655   intptr_t old_generation_allocation_limit() const { |   827   intptr_t old_generation_allocation_limit() const { | 
|   656     return old_generation_allocation_limit_; |   828     return old_generation_allocation_limit_; | 
|   657   } |   829   } | 
|   658  |   830  | 
|   659   // Return the starting address and a mask for the new space.  And-masking an |  | 
|   660   // address with the mask will result in the start address of the new space |  | 
|   661   // for all addresses in either semispace. |  | 
|   662   Address NewSpaceStart() { return new_space_.start(); } |  | 
|   663   uintptr_t NewSpaceMask() { return new_space_.mask(); } |  | 
|   664   Address NewSpaceTop() { return new_space_.top(); } |  | 
|   665  |  | 
|   666   NewSpace* new_space() { return &new_space_; } |  | 
|   667   OldSpace* old_space() { return old_space_; } |  | 
|   668   OldSpace* code_space() { return code_space_; } |  | 
|   669   MapSpace* map_space() { return map_space_; } |  | 
|   670   LargeObjectSpace* lo_space() { return lo_space_; } |  | 
|   671   PagedSpace* paged_space(int idx) { |  | 
|   672     switch (idx) { |  | 
|   673       case OLD_SPACE: |  | 
|   674         return old_space(); |  | 
|   675       case MAP_SPACE: |  | 
|   676         return map_space(); |  | 
|   677       case CODE_SPACE: |  | 
|   678         return code_space(); |  | 
|   679       case NEW_SPACE: |  | 
|   680       case LO_SPACE: |  | 
|   681         UNREACHABLE(); |  | 
|   682     } |  | 
|   683     return NULL; |  | 
|   684   } |  | 
|   685   Space* space(int idx) { |  | 
|   686     switch (idx) { |  | 
|   687       case NEW_SPACE: |  | 
|   688         return new_space(); |  | 
|   689       case LO_SPACE: |  | 
|   690         return lo_space(); |  | 
|   691       default: |  | 
|   692         return paged_space(idx); |  | 
|   693     } |  | 
|   694   } |  | 
|   695  |  | 
|   696   // Returns name of the space. |  | 
|   697   const char* GetSpaceName(int idx); |  | 
|   698  |  | 
|   699   bool always_allocate() { return always_allocate_scope_depth_ != 0; } |   831   bool always_allocate() { return always_allocate_scope_depth_ != 0; } | 
|   700   Address always_allocate_scope_depth_address() { |   832   Address always_allocate_scope_depth_address() { | 
|   701     return reinterpret_cast<Address>(&always_allocate_scope_depth_); |   833     return reinterpret_cast<Address>(&always_allocate_scope_depth_); | 
|   702   } |   834   } | 
|   703  |   835  | 
|   704   Address* NewSpaceAllocationTopAddress() { |   836   Address* NewSpaceAllocationTopAddress() { | 
|   705     return new_space_.allocation_top_address(); |   837     return new_space_.allocation_top_address(); | 
|   706   } |   838   } | 
|   707   Address* NewSpaceAllocationLimitAddress() { |   839   Address* NewSpaceAllocationLimitAddress() { | 
|   708     return new_space_.allocation_limit_address(); |   840     return new_space_.allocation_limit_address(); | 
|   709   } |   841   } | 
|   710  |   842  | 
|   711   Address* OldSpaceAllocationTopAddress() { |   843   Address* OldSpaceAllocationTopAddress() { | 
|   712     return old_space_->allocation_top_address(); |   844     return old_space_->allocation_top_address(); | 
|   713   } |   845   } | 
|   714   Address* OldSpaceAllocationLimitAddress() { |   846   Address* OldSpaceAllocationLimitAddress() { | 
|   715     return old_space_->allocation_limit_address(); |   847     return old_space_->allocation_limit_address(); | 
|   716   } |   848   } | 
|   717  |   849  | 
|   718   // TODO(hpayer): There is still a missmatch between capacity and actual |   850   // TODO(hpayer): There is still a missmatch between capacity and actual | 
|   719   // committed memory size. |   851   // committed memory size. | 
|   720   bool CanExpandOldGeneration(int size) { |   852   bool CanExpandOldGeneration(int size) { | 
|   721     return (CommittedOldGenerationMemory() + size) < MaxOldGenerationSize(); |   853     return (CommittedOldGenerationMemory() + size) < MaxOldGenerationSize(); | 
|   722   } |   854   } | 
|   723  |   855  | 
|   724   // Returns a deep copy of the JavaScript object. |  | 
|   725   // Properties and elements are copied too. |  | 
|   726   // Optionally takes an AllocationSite to be appended in an AllocationMemento. |  | 
|   727   MUST_USE_RESULT AllocationResult |  | 
|   728       CopyJSObject(JSObject* source, AllocationSite* site = NULL); |  | 
|   729  |  | 
|   730   // Calculates the maximum amount of filler that could be required by the |  | 
|   731   // given alignment. |  | 
|   732   static int GetMaximumFillToAlign(AllocationAlignment alignment); |  | 
|   733   // Calculates the actual amount of filler required for a given address at the |  | 
|   734   // given alignment. |  | 
|   735   static int GetFillToAlign(Address address, AllocationAlignment alignment); |  | 
|   736  |  | 
|   737   // Creates a filler object and returns a heap object immediately after it. |  | 
|   738   MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object, |  | 
|   739                                                 int filler_size); |  | 
|   740   // Creates a filler object if needed for alignment and returns a heap object |  | 
|   741   // immediately after it. If any space is left after the returned object, |  | 
|   742   // another filler object is created so the over allocated memory is iterable. |  | 
|   743   MUST_USE_RESULT HeapObject* AlignWithFiller(HeapObject* object, |  | 
|   744                                               int object_size, |  | 
|   745                                               int allocation_size, |  | 
|   746                                               AllocationAlignment alignment); |  | 
|   747  |  | 
|   748   // Clear the Instanceof cache (used when a prototype changes). |   856   // Clear the Instanceof cache (used when a prototype changes). | 
|   749   inline void ClearInstanceofCache(); |   857   inline void ClearInstanceofCache(); | 
|   750  |   858  | 
|   751   // Iterates the whole code space to clear all ICs of the given kind. |   859   // Iterates the whole code space to clear all ICs of the given kind. | 
|   752   void ClearAllICsByKind(Code::Kind kind); |   860   void ClearAllICsByKind(Code::Kind kind); | 
|   753  |   861  | 
|   754   // FreeSpace objects have a null map after deserialization. Update the map. |   862   // FreeSpace objects have a null map after deserialization. Update the map. | 
|   755   void RepairFreeListsAfterDeserialization(); |   863   void RepairFreeListsAfterDeserialization(); | 
|   756  |   864  | 
|   757   template <typename T> |  | 
|   758   static inline bool IsOneByte(T t, int chars); |  | 
|   759  |  | 
|   760   // Move len elements within a given array from src_index index to dst_index |   865   // Move len elements within a given array from src_index index to dst_index | 
|   761   // index. |   866   // index. | 
|   762   void MoveElements(FixedArray* array, int dst_index, int src_index, int len); |   867   void MoveElements(FixedArray* array, int dst_index, int src_index, int len); | 
|   763  |   868  | 
|   764   // Sloppy mode arguments object size. |  | 
|   765   static const int kSloppyArgumentsObjectSize = |  | 
|   766       JSObject::kHeaderSize + 2 * kPointerSize; |  | 
|   767   // Strict mode arguments has no callee so it is smaller. |  | 
|   768   static const int kStrictArgumentsObjectSize = |  | 
|   769       JSObject::kHeaderSize + 1 * kPointerSize; |  | 
|   770   // Indicies for direct access into argument objects. |  | 
|   771   static const int kArgumentsLengthIndex = 0; |  | 
|   772   // callee is only valid in sloppy mode. |  | 
|   773   static const int kArgumentsCalleeIndex = 1; |  | 
|   774  |  | 
|   775   // Finalizes an external string by deleting the associated external |   869   // Finalizes an external string by deleting the associated external | 
|   776   // data and clearing the resource pointer. |   870   // data and clearing the resource pointer. | 
|   777   inline void FinalizeExternalString(String* string); |   871   inline void FinalizeExternalString(String* string); | 
|   778  |   872  | 
|   779   // Initialize a filler object to keep the ability to iterate over the heap |   873   // Initialize a filler object to keep the ability to iterate over the heap | 
|   780   // when introducing gaps within pages. |   874   // when introducing gaps within pages. | 
|   781   void CreateFillerObjectAt(Address addr, int size); |   875   void CreateFillerObjectAt(Address addr, int size); | 
|   782  |   876  | 
|   783   bool CanMoveObjectStart(HeapObject* object); |   877   bool CanMoveObjectStart(HeapObject* object); | 
|   784  |   878  | 
|   785   // Indicates whether live bytes adjustment is triggered |  | 
|   786   // - from within the GC code before sweeping started (SEQUENTIAL_TO_SWEEPER), |  | 
|   787   // - or from within GC (CONCURRENT_TO_SWEEPER), |  | 
|   788   // - or mutator code (CONCURRENT_TO_SWEEPER). |  | 
|   789   enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER }; |  | 
|   790  |  | 
|   791   // Maintain consistency of live bytes during incremental marking. |   879   // Maintain consistency of live bytes during incremental marking. | 
|   792   void AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode); |   880   void AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode); | 
|   793  |   881  | 
|   794   // Trim the given array from the left. Note that this relocates the object |   882   // Trim the given array from the left. Note that this relocates the object | 
|   795   // start and hence is only valid if there is only a single reference to it. |   883   // start and hence is only valid if there is only a single reference to it. | 
|   796   FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim); |   884   FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim); | 
|   797  |   885  | 
|   798   // Trim the given array from the right. |   886   // Trim the given array from the right. | 
|   799   template<Heap::InvocationMode mode> |   887   template<Heap::InvocationMode mode> | 
|   800   void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim); |   888   void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim); | 
|   801  |   889  | 
|   802   // Converts the given boolean condition to JavaScript boolean value. |   890   // Converts the given boolean condition to JavaScript boolean value. | 
|   803   inline Object* ToBoolean(bool condition); |   891   inline Object* ToBoolean(bool condition); | 
|   804  |   892  | 
|   805   // Performs garbage collection operation. |  | 
|   806   // Returns whether there is a chance that another major GC could |  | 
|   807   // collect more garbage. |  | 
|   808   inline bool CollectGarbage( |  | 
|   809       AllocationSpace space, const char* gc_reason = NULL, |  | 
|   810       const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |  | 
|   811  |  | 
|   812   static const int kNoGCFlags = 0; |  | 
|   813   static const int kReduceMemoryFootprintMask = 1; |  | 
|   814   static const int kAbortIncrementalMarkingMask = 2; |  | 
|   815   static const int kFinalizeIncrementalMarkingMask = 4; |  | 
|   816  |  | 
|   817   // Making the heap iterable requires us to abort incremental marking. |  | 
|   818   static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask; |  | 
|   819  |  | 
|   820   // Invoked when GC was requested via the stack guard. |  | 
|   821   void HandleGCRequest(); |  | 
|   822  |  | 
|   823   // Attempt to over-approximate the weak closure by marking object groups and |   893   // Attempt to over-approximate the weak closure by marking object groups and | 
|   824   // implicit references from global handles, but don't atomically complete |   894   // implicit references from global handles, but don't atomically complete | 
|   825   // marking. If we continue to mark incrementally, we might have marked |   895   // marking. If we continue to mark incrementally, we might have marked | 
|   826   // objects that die later. |   896   // objects that die later. | 
|   827   void OverApproximateWeakClosure(const char* gc_reason); |   897   void OverApproximateWeakClosure(const char* gc_reason); | 
|   828  |   898  | 
|   829   // Performs a full garbage collection.  If (flags & kMakeHeapIterableMask) is |  | 
|   830   // non-zero, then the slower precise sweeper is used, which leaves the heap |  | 
|   831   // in a state where we can iterate over the heap visiting all objects. |  | 
|   832   void CollectAllGarbage( |  | 
|   833       int flags = kFinalizeIncrementalMarkingMask, const char* gc_reason = NULL, |  | 
|   834       const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |  | 
|   835  |  | 
|   836   // Last hope GC, should try to squeeze as much as possible. |  | 
|   837   void CollectAllAvailableGarbage(const char* gc_reason = NULL); |  | 
|   838  |  | 
|   839   // Check whether the heap is currently iterable. |   899   // Check whether the heap is currently iterable. | 
|   840   bool IsHeapIterable(); |   900   bool IsHeapIterable(); | 
|   841  |   901  | 
|   842   // Notify the heap that a context has been disposed. |   902   // Notify the heap that a context has been disposed. | 
|   843   int NotifyContextDisposed(bool dependant_context); |   903   int NotifyContextDisposed(bool dependant_context); | 
|   844  |   904  | 
|   845   // Start incremental marking and ensure that idle time handler can perform |  | 
|   846   // incremental steps. |  | 
|   847   void StartIdleIncrementalMarking(); |  | 
|   848  |  | 
|   849   // Starts incremental marking assuming incremental marking is currently |  | 
|   850   // stopped. |  | 
|   851   void StartIncrementalMarking(int gc_flags, |  | 
|   852                                const GCCallbackFlags gc_callback_flags, |  | 
|   853                                const char* reason = nullptr); |  | 
|   854  |  | 
|   855   // Performs incremental marking steps of step_size_in_bytes as long as |  | 
|   856   // deadline_ins_ms is not reached. step_size_in_bytes can be 0 to compute |  | 
|   857   // an estimate increment. Returns the remaining time that cannot be used |  | 
|   858   // for incremental marking anymore because a single step would exceed the |  | 
|   859   // deadline. |  | 
|   860   double AdvanceIncrementalMarking( |  | 
|   861       intptr_t step_size_in_bytes, double deadline_in_ms, |  | 
|   862       IncrementalMarking::StepActions step_actions); |  | 
|   863  |  | 
|   864   void FinalizeIncrementalMarkingIfComplete(const char* comment); |   905   void FinalizeIncrementalMarkingIfComplete(const char* comment); | 
|   865  |   906  | 
|   866   inline void increment_scan_on_scavenge_pages() { |   907   inline void increment_scan_on_scavenge_pages() { | 
|   867     scan_on_scavenge_pages_++; |   908     scan_on_scavenge_pages_++; | 
|   868     if (FLAG_gc_verbose) { |   909     if (FLAG_gc_verbose) { | 
|   869       PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_); |   910       PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_); | 
|   870     } |   911     } | 
|   871   } |   912   } | 
|   872  |   913  | 
|   873   inline void decrement_scan_on_scavenge_pages() { |   914   inline void decrement_scan_on_scavenge_pages() { | 
|   874     scan_on_scavenge_pages_--; |   915     scan_on_scavenge_pages_--; | 
|   875     if (FLAG_gc_verbose) { |   916     if (FLAG_gc_verbose) { | 
|   876       PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_); |   917       PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_); | 
|   877     } |   918     } | 
|   878   } |   919   } | 
|   879  |   920  | 
|   880   PromotionQueue* promotion_queue() { return &promotion_queue_; } |  | 
|   881  |  | 
|   882   void AddGCPrologueCallback(v8::Isolate::GCCallback callback, |  | 
|   883                              GCType gc_type_filter, bool pass_isolate = true); |  | 
|   884   void RemoveGCPrologueCallback(v8::Isolate::GCCallback callback); |  | 
|   885  |  | 
|   886   void AddGCEpilogueCallback(v8::Isolate::GCCallback callback, |  | 
|   887                              GCType gc_type_filter, bool pass_isolate = true); |  | 
|   888   void RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback); |  | 
|   889  |  | 
|   890 // Heap root getters.  We have versions with and without type::cast() here. |   921 // Heap root getters.  We have versions with and without type::cast() here. | 
|   891 // You can't use type::cast during GC because the assert fails. |   922 // You can't use type::cast during GC because the assert fails. | 
|   892 // TODO(1490): Try removing the unchecked accessors, now that GC marking does |   923 // TODO(1490): Try removing the unchecked accessors, now that GC marking does | 
|   893 // not corrupt the map. |   924 // not corrupt the map. | 
|   894 #define ROOT_ACCESSOR(type, name, camel_name)                         \ |   925 #define ROOT_ACCESSOR(type, name, camel_name)                         \ | 
|   895   inline type* name();                                                \ |   926   inline type* name();                                                \ | 
|   896   type* raw_unchecked_##name() {                                      \ |   927   type* raw_unchecked_##name() {                                      \ | 
|   897     return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \ |   928     return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \ | 
|   898   } |   929   } | 
|   899   ROOT_LIST(ROOT_ACCESSOR) |   930   ROOT_LIST(ROOT_ACCESSOR) | 
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
|   937   } |   968   } | 
|   938  |   969  | 
|   939   void set_encountered_weak_cells(Object* weak_cell) { |   970   void set_encountered_weak_cells(Object* weak_cell) { | 
|   940     encountered_weak_cells_ = weak_cell; |   971     encountered_weak_cells_ = weak_cell; | 
|   941   } |   972   } | 
|   942   Object* encountered_weak_cells() const { return encountered_weak_cells_; } |   973   Object* encountered_weak_cells() const { return encountered_weak_cells_; } | 
|   943  |   974  | 
|   944   // Number of mark-sweeps. |   975   // Number of mark-sweeps. | 
|   945   int ms_count() const { return ms_count_; } |   976   int ms_count() const { return ms_count_; } | 
|   946  |   977  | 
|   947   // Iterates over all roots in the heap. |  | 
|   948   void IterateRoots(ObjectVisitor* v, VisitMode mode); |  | 
|   949   // Iterates over all strong roots in the heap. |  | 
|   950   void IterateStrongRoots(ObjectVisitor* v, VisitMode mode); |  | 
|   951   // Iterates over entries in the smi roots list.  Only interesting to the |  | 
|   952   // serializer/deserializer, since GC does not care about smis. |  | 
|   953   void IterateSmiRoots(ObjectVisitor* v); |  | 
|   954   // Iterates over all the other roots in the heap. |  | 
|   955   void IterateWeakRoots(ObjectVisitor* v, VisitMode mode); |  | 
|   956  |  | 
|   957   // Iterate pointers to from semispace of new space found in memory interval |  | 
|   958   // from start to end within |object|. |  | 
|   959   void IterateAndMarkPointersToFromSpace(HeapObject* object, Address start, |  | 
|   960                                          Address end, bool record_slots, |  | 
|   961                                          ObjectSlotCallback callback); |  | 
|   962  |  | 
|   963   // Returns whether the object resides in new space. |  | 
|   964   inline bool InNewSpace(Object* object); |  | 
|   965   inline bool InNewSpace(Address address); |  | 
|   966   inline bool InNewSpacePage(Address address); |  | 
|   967   inline bool InFromSpace(Object* object); |  | 
|   968   inline bool InToSpace(Object* object); |  | 
|   969  |  | 
|   970   // Returns whether the object resides in old space. |  | 
|   971   inline bool InOldSpace(Address address); |  | 
|   972   inline bool InOldSpace(Object* object); |  | 
|   973  |  | 
|   974   // Checks whether an address/object in the heap (including auxiliary |  | 
|   975   // area and unused area). |  | 
|   976   bool Contains(Address addr); |  | 
|   977   bool Contains(HeapObject* value); |  | 
|   978  |  | 
|   979   // Checks whether an address/object in a space. |  | 
|   980   // Currently used by tests, serialization and heap verification only. |  | 
|   981   bool InSpace(Address addr, AllocationSpace space); |  | 
|   982   bool InSpace(HeapObject* value, AllocationSpace space); |  | 
|   983  |  | 
|   984   // Checks whether the space is valid. |  | 
|   985   static bool IsValidAllocationSpace(AllocationSpace space); |  | 
|   986  |  | 
|   987   // Checks whether the given object is allowed to be migrated from it's |   978   // Checks whether the given object is allowed to be migrated from it's | 
|   988   // current space into the given destination space. Used for debugging. |   979   // current space into the given destination space. Used for debugging. | 
|   989   inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest); |   980   inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest); | 
|   990  |   981  | 
|   991   // Sets the stub_cache_ (only used when expanding the dictionary). |   982   // Sets the stub_cache_ (only used when expanding the dictionary). | 
|   992   void public_set_code_stubs(UnseededNumberDictionary* value) { |   983   void public_set_code_stubs(UnseededNumberDictionary* value) { | 
|   993     roots_[kCodeStubsRootIndex] = value; |   984     roots_[kCodeStubsRootIndex] = value; | 
|   994   } |   985   } | 
|   995  |   986  | 
|   996   // Sets the non_monomorphic_cache_ (only used when expanding the dictionary). |   987   // Sets the non_monomorphic_cache_ (only used when expanding the dictionary). | 
| (...skipping 13 matching lines...) Expand all  Loading... | 
|  1010     roots_[kMaterializedObjectsRootIndex] = objects; |  1001     roots_[kMaterializedObjectsRootIndex] = objects; | 
|  1011   } |  1002   } | 
|  1012  |  1003  | 
|  1013   // Generated code can embed this address to get access to the roots. |  1004   // Generated code can embed this address to get access to the roots. | 
|  1014   Object** roots_array_start() { return roots_; } |  1005   Object** roots_array_start() { return roots_; } | 
|  1015  |  1006  | 
|  1016   Address* store_buffer_top_address() { |  1007   Address* store_buffer_top_address() { | 
|  1017     return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]); |  1008     return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]); | 
|  1018   } |  1009   } | 
|  1019  |  1010  | 
|  1020   static bool RootIsImmortalImmovable(int root_index); |  | 
|  1021   void CheckHandleCount(); |  1011   void CheckHandleCount(); | 
|  1022  |  1012  | 
|  1023 #ifdef VERIFY_HEAP |  | 
|  1024   // Verify the heap is in its normal state before or after a GC. |  | 
|  1025   void Verify(); |  | 
|  1026 #endif |  | 
|  1027  |  | 
|  1028 #ifdef DEBUG |  | 
|  1029   void Print(); |  | 
|  1030   void PrintHandles(); |  | 
|  1031  |  | 
|  1032   // Report heap statistics. |  | 
|  1033   void ReportHeapStatistics(const char* title); |  | 
|  1034   void ReportCodeStatistics(const char* title); |  | 
|  1035 #endif |  | 
|  1036  |  | 
|  1037   // Zapping is needed for verify heap, and always done in debug builds. |  | 
|  1038   static inline bool ShouldZapGarbage() { |  | 
|  1039 #ifdef DEBUG |  | 
|  1040     return true; |  | 
|  1041 #else |  | 
|  1042 #ifdef VERIFY_HEAP |  | 
|  1043     return FLAG_verify_heap; |  | 
|  1044 #else |  | 
|  1045     return false; |  | 
|  1046 #endif |  | 
|  1047 #endif |  | 
|  1048   } |  | 
|  1049  |  | 
|  1050   // Number of "runtime allocations" done so far. |  1013   // Number of "runtime allocations" done so far. | 
|  1051   uint32_t allocations_count() { return allocations_count_; } |  1014   uint32_t allocations_count() { return allocations_count_; } | 
|  1052  |  1015  | 
|  1053   // Returns deterministic "time" value in ms. Works only with |  1016   // Returns deterministic "time" value in ms. Works only with | 
|  1054   // FLAG_verify_predictable. |  1017   // FLAG_verify_predictable. | 
|  1055   double synthetic_time() { return allocations_count_ / 2.0; } |  1018   double synthetic_time() { return allocations_count_ / 2.0; } | 
|  1056  |  1019  | 
|  1057   // Print short heap statistics. |  1020   // Print short heap statistics. | 
|  1058   void PrintShortHeapStatistics(); |  1021   void PrintShortHeapStatistics(); | 
|  1059  |  1022  | 
|  1060   size_t object_count_last_gc(size_t index) { |  1023   size_t object_count_last_gc(size_t index) { | 
|  1061     return index < OBJECT_STATS_COUNT ? object_counts_last_time_[index] : 0; |  1024     return index < OBJECT_STATS_COUNT ? object_counts_last_time_[index] : 0; | 
|  1062   } |  1025   } | 
 |  1026  | 
|  1063   size_t object_size_last_gc(size_t index) { |  1027   size_t object_size_last_gc(size_t index) { | 
|  1064     return index < OBJECT_STATS_COUNT ? object_sizes_last_time_[index] : 0; |  1028     return index < OBJECT_STATS_COUNT ? object_sizes_last_time_[index] : 0; | 
|  1065   } |  1029   } | 
|  1066  |  1030  | 
|  1067   // Write barrier support for address[offset] = o. |  1031   // Write barrier support for address[offset] = o. | 
|  1068   INLINE(void RecordWrite(Address address, int offset)); |  1032   INLINE(void RecordWrite(Address address, int offset)); | 
|  1069  |  1033  | 
|  1070   // Write barrier support for address[start : start + len[ = o. |  1034   // Write barrier support for address[start : start + len[ = o. | 
|  1071   INLINE(void RecordWrites(Address address, int start, int len)); |  1035   INLINE(void RecordWrites(Address address, int start, int len)); | 
|  1072  |  1036  | 
|  1073   enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT }; |  | 
|  1074   inline HeapState gc_state() { return gc_state_; } |  1037   inline HeapState gc_state() { return gc_state_; } | 
|  1075  |  1038  | 
|  1076   inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; } |  1039   inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; } | 
|  1077  |  1040  | 
|  1078 #ifdef DEBUG |  | 
|  1079   void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; } |  | 
|  1080  |  | 
|  1081   void TracePathToObjectFrom(Object* target, Object* root); |  | 
|  1082   void TracePathToObject(Object* target); |  | 
|  1083   void TracePathToGlobal(); |  | 
|  1084 #endif |  | 
|  1085  |  | 
|  1086   // Callback function passed to Heap::Iterate etc.  Copies an object if |  | 
|  1087   // necessary, the object might be promoted to an old space.  The caller must |  | 
|  1088   // ensure the precondition that the object is (a) a heap object and (b) in |  | 
|  1089   // the heap's from space. |  | 
|  1090   static inline void ScavengePointer(HeapObject** p); |  | 
|  1091   static inline void ScavengeObject(HeapObject** p, HeapObject* object); |  | 
|  1092  |  | 
|  1093   // Slow part of scavenge object. |  | 
|  1094   static void ScavengeObjectSlow(HeapObject** p, HeapObject* object); |  | 
|  1095  |  | 
|  1096   enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT }; |  | 
|  1097  |  | 
|  1098   // If an object has an AllocationMemento trailing it, return it, otherwise |  1041   // If an object has an AllocationMemento trailing it, return it, otherwise | 
|  1099   // return NULL; |  1042   // return NULL; | 
|  1100   inline AllocationMemento* FindAllocationMemento(HeapObject* object); |  1043   inline AllocationMemento* FindAllocationMemento(HeapObject* object); | 
|  1101  |  1044  | 
|  1102   // An object may have an AllocationSite associated with it through a trailing |  | 
|  1103   // AllocationMemento. Its feedback should be updated when objects are found |  | 
|  1104   // in the heap. |  | 
|  1105   static inline void UpdateAllocationSiteFeedback(HeapObject* object, |  | 
|  1106                                                   ScratchpadSlotMode mode); |  | 
|  1107  |  | 
|  1108   // Support for partial snapshots.  After calling this we have a linear |  | 
|  1109   // space to write objects in each space. |  | 
|  1110   struct Chunk { |  | 
|  1111     uint32_t size; |  | 
|  1112     Address start; |  | 
|  1113     Address end; |  | 
|  1114   }; |  | 
|  1115  |  | 
|  1116   typedef List<Chunk> Reservation; |  | 
|  1117  |  | 
|  1118   // Returns false if not able to reserve. |  1045   // Returns false if not able to reserve. | 
|  1119   bool ReserveSpace(Reservation* reservations); |  1046   bool ReserveSpace(Reservation* reservations); | 
|  1120  |  1047  | 
|  1121   // |  1048   // | 
|  1122   // Support for the API. |  1049   // Support for the API. | 
|  1123   // |  1050   // | 
|  1124  |  1051  | 
|  1125   void CreateApiObjects(); |  1052   void CreateApiObjects(); | 
|  1126  |  1053  | 
|  1127   inline intptr_t PromotedTotalSize() { |  | 
|  1128     int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize(); |  | 
|  1129     if (total > std::numeric_limits<intptr_t>::max()) { |  | 
|  1130       // TODO(erikcorry): Use uintptr_t everywhere we do heap size calculations. |  | 
|  1131       return std::numeric_limits<intptr_t>::max(); |  | 
|  1132     } |  | 
|  1133     if (total < 0) return 0; |  | 
|  1134     return static_cast<intptr_t>(total); |  | 
|  1135   } |  | 
|  1136  |  | 
|  1137   inline intptr_t OldGenerationSpaceAvailable() { |  | 
|  1138     return old_generation_allocation_limit_ - PromotedTotalSize(); |  | 
|  1139   } |  | 
|  1140  |  | 
|  1141   inline intptr_t OldGenerationCapacityAvailable() { |  | 
|  1142     return max_old_generation_size_ - PromotedTotalSize(); |  | 
|  1143   } |  | 
|  1144  |  | 
|  1145   static const intptr_t kMinimumOldGenerationAllocationLimit = |  | 
|  1146       8 * (Page::kPageSize > MB ? Page::kPageSize : MB); |  | 
|  1147  |  | 
|  1148   static const int kInitalOldGenerationLimitFactor = 2; |  | 
|  1149  |  | 
|  1150 #if V8_OS_ANDROID |  | 
|  1151   // Don't apply pointer multiplier on Android since it has no swap space and |  | 
|  1152   // should instead adapt it's heap size based on available physical memory. |  | 
|  1153   static const int kPointerMultiplier = 1; |  | 
|  1154 #else |  | 
|  1155   static const int kPointerMultiplier = i::kPointerSize / 4; |  | 
|  1156 #endif |  | 
|  1157  |  | 
|  1158   // The new space size has to be a power of 2. Sizes are in MB. |  | 
|  1159   static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier; |  | 
|  1160   static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier; |  | 
|  1161   static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier; |  | 
|  1162   static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier; |  | 
|  1163  |  | 
|  1164   // The old space size has to be a multiple of Page::kPageSize. |  | 
|  1165   // Sizes are in MB. |  | 
|  1166   static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier; |  | 
|  1167   static const int kMaxOldSpaceSizeMediumMemoryDevice = |  | 
|  1168       256 * kPointerMultiplier; |  | 
|  1169   static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier; |  | 
|  1170   static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier; |  | 
|  1171  |  | 
|  1172   // The executable size has to be a multiple of Page::kPageSize. |  | 
|  1173   // Sizes are in MB. |  | 
|  1174   static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier; |  | 
|  1175   static const int kMaxExecutableSizeMediumMemoryDevice = |  | 
|  1176       192 * kPointerMultiplier; |  | 
|  1177   static const int kMaxExecutableSizeHighMemoryDevice = |  | 
|  1178       256 * kPointerMultiplier; |  | 
|  1179   static const int kMaxExecutableSizeHugeMemoryDevice = |  | 
|  1180       256 * kPointerMultiplier; |  | 
|  1181  |  | 
|  1182   static const int kTraceRingBufferSize = 512; |  | 
|  1183   static const int kStacktraceBufferSize = 512; |  | 
|  1184  |  | 
|  1185   static const double kMinHeapGrowingFactor; |  | 
|  1186   static const double kMaxHeapGrowingFactor; |  | 
|  1187   static const double kMaxHeapGrowingFactorMemoryConstrained; |  | 
|  1188   static const double kMaxHeapGrowingFactorIdle; |  | 
|  1189   static const double kTargetMutatorUtilization; |  | 
|  1190  |  | 
|  1191   static double HeapGrowingFactor(double gc_speed, double mutator_speed); |  | 
|  1192  |  | 
|  1193   // Calculates the allocation limit based on a given growing factor and a |  1054   // Calculates the allocation limit based on a given growing factor and a | 
|  1194   // given old generation size. |  1055   // given old generation size. | 
|  1195   intptr_t CalculateOldGenerationAllocationLimit(double factor, |  1056   intptr_t CalculateOldGenerationAllocationLimit(double factor, | 
|  1196                                                  intptr_t old_gen_size); |  1057                                                  intptr_t old_gen_size); | 
|  1197  |  1058  | 
|  1198   // Sets the allocation limit to trigger the next full garbage collection. |  1059   // Sets the allocation limit to trigger the next full garbage collection. | 
|  1199   void SetOldGenerationAllocationLimit(intptr_t old_gen_size, double gc_speed, |  1060   void SetOldGenerationAllocationLimit(intptr_t old_gen_size, double gc_speed, | 
|  1200                                        double mutator_speed); |  1061                                        double mutator_speed); | 
|  1201  |  1062  | 
|  1202   // Decrease the allocation limit if the new limit based on the given |  1063   // Decrease the allocation limit if the new limit based on the given | 
|  1203   // parameters is lower than the current limit. |  1064   // parameters is lower than the current limit. | 
|  1204   void DampenOldGenerationAllocationLimit(intptr_t old_gen_size, |  1065   void DampenOldGenerationAllocationLimit(intptr_t old_gen_size, | 
|  1205                                           double gc_speed, |  1066                                           double gc_speed, | 
|  1206                                           double mutator_speed); |  1067                                           double mutator_speed); | 
|  1207  |  1068  | 
|  1208   // Indicates whether inline bump-pointer allocation has been disabled. |  | 
|  1209   bool inline_allocation_disabled() { return inline_allocation_disabled_; } |  | 
|  1210  |  | 
|  1211   // Switch whether inline bump-pointer allocation should be used. |  | 
|  1212   void EnableInlineAllocation(); |  | 
|  1213   void DisableInlineAllocation(); |  | 
|  1214  |  | 
|  1215   // Implements the corresponding V8 API function. |  1069   // Implements the corresponding V8 API function. | 
|  1216   bool IdleNotification(double deadline_in_seconds); |  1070   bool IdleNotification(double deadline_in_seconds); | 
|  1217   bool IdleNotification(int idle_time_in_ms); |  1071   bool IdleNotification(int idle_time_in_ms); | 
|  1218  |  1072  | 
|  1219   double MonotonicallyIncreasingTimeInMs(); |  1073   double MonotonicallyIncreasingTimeInMs(); | 
|  1220  |  1074  | 
|  1221   // Declare all the root indices.  This defines the root list order. |  | 
|  1222   enum RootListIndex { |  | 
|  1223 #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, |  | 
|  1224     STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION) |  | 
|  1225 #undef ROOT_INDEX_DECLARATION |  | 
|  1226  |  | 
|  1227 #define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex, |  | 
|  1228     INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION) |  | 
|  1229 #undef STRING_DECLARATION |  | 
|  1230  |  | 
|  1231 #define SYMBOL_INDEX_DECLARATION(name) k##name##RootIndex, |  | 
|  1232     PRIVATE_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION) |  | 
|  1233 #undef SYMBOL_INDEX_DECLARATION |  | 
|  1234  |  | 
|  1235 #define SYMBOL_INDEX_DECLARATION(name, varname, description) k##name##RootIndex, |  | 
|  1236     PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION) |  | 
|  1237 #undef SYMBOL_INDEX_DECLARATION |  | 
|  1238  |  | 
|  1239 // Utility type maps |  | 
|  1240 #define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex, |  | 
|  1241     STRUCT_LIST(DECLARE_STRUCT_MAP) |  | 
|  1242 #undef DECLARE_STRUCT_MAP |  | 
|  1243     kStringTableRootIndex, |  | 
|  1244  |  | 
|  1245 #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, |  | 
|  1246     SMI_ROOT_LIST(ROOT_INDEX_DECLARATION) |  | 
|  1247 #undef ROOT_INDEX_DECLARATION |  | 
|  1248     kRootListLength, |  | 
|  1249     kStrongRootListLength = kStringTableRootIndex, |  | 
|  1250     kSmiRootsStart = kStringTableRootIndex + 1 |  | 
|  1251   }; |  | 
|  1252  |  | 
|  1253   Object* root(RootListIndex index) { return roots_[index]; } |  1075   Object* root(RootListIndex index) { return roots_[index]; } | 
|  1254  |  1076  | 
|  1255   STATIC_ASSERT(kUndefinedValueRootIndex == |  | 
|  1256                 Internals::kUndefinedValueRootIndex); |  | 
|  1257   STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex); |  | 
|  1258   STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex); |  | 
|  1259   STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex); |  | 
|  1260   STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex); |  | 
|  1261  |  | 
|  1262   // Generated code can embed direct references to non-writable roots if |  | 
|  1263   // they are in new space. |  | 
|  1264   static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index); |  | 
|  1265   // Generated code can treat direct references to this root as constant. |  1077   // Generated code can treat direct references to this root as constant. | 
|  1266   bool RootCanBeTreatedAsConstant(RootListIndex root_index); |  1078   bool RootCanBeTreatedAsConstant(RootListIndex root_index); | 
|  1267  |  1079  | 
|  1268   Map* MapForFixedTypedArray(ExternalArrayType array_type); |  1080   Map* MapForFixedTypedArray(ExternalArrayType array_type); | 
|  1269   RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type); |  1081   RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type); | 
|  1270  |  1082  | 
|  1271   RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind); |  1083   RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind); | 
|  1272   FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map); |  1084   FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map); | 
|  1273  |  1085  | 
|  1274   void RecordStats(HeapStats* stats, bool take_snapshot = false); |  1086   void RecordStats(HeapStats* stats, bool take_snapshot = false); | 
|  1275  |  1087  | 
|  1276   // Copy block of memory from src to dst. Size of block should be aligned |  | 
|  1277   // by pointer size. |  | 
|  1278   static inline void CopyBlock(Address dst, Address src, int byte_size); |  | 
|  1279  |  | 
|  1280   // Optimized version of memmove for blocks with pointer size aligned sizes and |  | 
|  1281   // pointer size aligned addresses. |  | 
|  1282   static inline void MoveBlock(Address dst, Address src, int byte_size); |  | 
|  1283  |  | 
|  1284   // Check new space expansion criteria and expand semispaces if it was hit. |  1088   // Check new space expansion criteria and expand semispaces if it was hit. | 
|  1285   void CheckNewSpaceExpansionCriteria(); |  1089   void CheckNewSpaceExpansionCriteria(); | 
|  1286  |  1090  | 
|  1287   inline void IncrementPromotedObjectsSize(int object_size) { |  | 
|  1288     DCHECK(object_size > 0); |  | 
|  1289     promoted_objects_size_ += object_size; |  | 
|  1290   } |  | 
|  1291  |  | 
|  1292   inline void IncrementSemiSpaceCopiedObjectSize(int object_size) { |  | 
|  1293     DCHECK(object_size > 0); |  | 
|  1294     semi_space_copied_object_size_ += object_size; |  | 
|  1295   } |  | 
|  1296  |  | 
|  1297   inline intptr_t SurvivedNewSpaceObjectSize() { |  | 
|  1298     return promoted_objects_size_ + semi_space_copied_object_size_; |  | 
|  1299   } |  | 
|  1300  |  | 
|  1301   inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; } |  | 
|  1302  |  | 
|  1303   inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; } |  | 
|  1304  |  | 
|  1305   inline void IncrementNodesPromoted() { nodes_promoted_++; } |  | 
|  1306  |  | 
|  1307   inline void IncrementYoungSurvivorsCounter(int survived) { |  | 
|  1308     DCHECK(survived >= 0); |  | 
|  1309     survived_last_scavenge_ = survived; |  | 
|  1310     survived_since_last_expansion_ += survived; |  | 
|  1311   } |  | 
|  1312  |  | 
|  1313   inline bool HeapIsFullEnoughToStartIncrementalMarking(intptr_t limit) { |  1091   inline bool HeapIsFullEnoughToStartIncrementalMarking(intptr_t limit) { | 
|  1314     if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true; |  1092     if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true; | 
|  1315  |  1093  | 
|  1316     intptr_t adjusted_allocation_limit = limit - new_space_.Capacity(); |  1094     intptr_t adjusted_allocation_limit = limit - new_space_.Capacity(); | 
|  1317  |  1095  | 
|  1318     if (PromotedTotalSize() >= adjusted_allocation_limit) return true; |  1096     if (PromotedTotalSize() >= adjusted_allocation_limit) return true; | 
|  1319  |  1097  | 
|  1320     return false; |  1098     return false; | 
|  1321   } |  1099   } | 
|  1322  |  1100  | 
|  1323   void UpdateNewSpaceReferencesInExternalStringTable( |  1101   void UpdateNewSpaceReferencesInExternalStringTable( | 
|  1324       ExternalStringTableUpdaterCallback updater_func); |  1102       ExternalStringTableUpdaterCallback updater_func); | 
|  1325  |  1103  | 
|  1326   void UpdateReferencesInExternalStringTable( |  1104   void UpdateReferencesInExternalStringTable( | 
|  1327       ExternalStringTableUpdaterCallback updater_func); |  1105       ExternalStringTableUpdaterCallback updater_func); | 
|  1328  |  1106  | 
|  1329   void ProcessAllWeakReferences(WeakObjectRetainer* retainer); |  1107   void ProcessAllWeakReferences(WeakObjectRetainer* retainer); | 
|  1330   void ProcessYoungWeakReferences(WeakObjectRetainer* retainer); |  1108   void ProcessYoungWeakReferences(WeakObjectRetainer* retainer); | 
|  1331  |  1109  | 
|  1332   void VisitExternalResources(v8::ExternalResourceVisitor* visitor); |  1110   void VisitExternalResources(v8::ExternalResourceVisitor* visitor); | 
|  1333  |  1111  | 
|  1334   // An object should be promoted if the object has survived a |  1112   // An object should be promoted if the object has survived a | 
|  1335   // scavenge operation. |  1113   // scavenge operation. | 
|  1336   inline bool ShouldBePromoted(Address old_address, int object_size); |  1114   inline bool ShouldBePromoted(Address old_address, int object_size); | 
|  1337  |  1115  | 
|  1338   void ClearNormalizedMapCaches(); |  1116   void ClearNormalizedMapCaches(); | 
|  1339  |  1117  | 
|  1340   GCTracer* tracer() { return tracer_; } |  | 
|  1341  |  | 
|  1342   // Returns the size of objects residing in non new spaces. |  | 
|  1343   intptr_t PromotedSpaceSizeOfObjects(); |  | 
|  1344  |  | 
|  1345   double total_regexp_code_generated() { return total_regexp_code_generated_; } |  | 
|  1346   void IncreaseTotalRegexpCodeGenerated(int size) { |  | 
|  1347     total_regexp_code_generated_ += size; |  | 
|  1348   } |  | 
|  1349  |  | 
|  1350   void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) { |  | 
|  1351     if (is_crankshafted) { |  | 
|  1352       crankshaft_codegen_bytes_generated_ += size; |  | 
|  1353     } else { |  | 
|  1354       full_codegen_bytes_generated_ += size; |  | 
|  1355     } |  | 
|  1356   } |  | 
|  1357  |  | 
|  1358   void UpdateNewSpaceAllocationCounter() { |  | 
|  1359     new_space_allocation_counter_ = NewSpaceAllocationCounter(); |  | 
|  1360   } |  | 
|  1361  |  | 
|  1362   size_t NewSpaceAllocationCounter() { |  | 
|  1363     return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC(); |  | 
|  1364   } |  | 
|  1365  |  | 
|  1366   // This should be used only for testing. |  | 
|  1367   void set_new_space_allocation_counter(size_t new_value) { |  | 
|  1368     new_space_allocation_counter_ = new_value; |  | 
|  1369   } |  | 
|  1370  |  | 
|  1371   void UpdateOldGenerationAllocationCounter() { |  | 
|  1372     old_generation_allocation_counter_ = OldGenerationAllocationCounter(); |  | 
|  1373   } |  | 
|  1374  |  | 
|  1375   size_t OldGenerationAllocationCounter() { |  | 
|  1376     return old_generation_allocation_counter_ + PromotedSinceLastGC(); |  | 
|  1377   } |  | 
|  1378  |  | 
|  1379   // This should be used only for testing. |  | 
|  1380   void set_old_generation_allocation_counter(size_t new_value) { |  | 
|  1381     old_generation_allocation_counter_ = new_value; |  | 
|  1382   } |  | 
|  1383  |  | 
|  1384   size_t PromotedSinceLastGC() { |  | 
|  1385     return PromotedSpaceSizeOfObjects() - old_generation_size_at_last_gc_; |  | 
|  1386   } |  | 
|  1387  |  | 
|  1388   // Update GC statistics that are tracked on the Heap. |  | 
|  1389   void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator, |  | 
|  1390                                     double marking_time); |  | 
|  1391  |  | 
|  1392   // Returns maximum GC pause. |  | 
|  1393   double get_max_gc_pause() { return max_gc_pause_; } |  | 
|  1394  |  | 
|  1395   // Returns maximum size of objects alive after GC. |  | 
|  1396   intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; } |  | 
|  1397  |  | 
|  1398   // Returns minimal interval between two subsequent collections. |  | 
|  1399   double get_min_in_mutator() { return min_in_mutator_; } |  | 
|  1400  |  | 
|  1401   void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature); |  1118   void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature); | 
|  1402  |  1119  | 
|  1403   MarkCompactCollector* mark_compact_collector() { |  | 
|  1404     return &mark_compact_collector_; |  | 
|  1405   } |  | 
|  1406  |  | 
|  1407   StoreBuffer* store_buffer() { return &store_buffer_; } |  | 
|  1408  |  | 
|  1409   IncrementalMarking* incremental_marking() { return &incremental_marking_; } |  | 
|  1410  |  | 
|  1411   ExternalStringTable* external_string_table() { |  1120   ExternalStringTable* external_string_table() { | 
|  1412     return &external_string_table_; |  1121     return &external_string_table_; | 
|  1413   } |  1122   } | 
|  1414  |  1123  | 
|  1415   bool concurrent_sweeping_enabled() { return concurrent_sweeping_enabled_; } |  1124   bool concurrent_sweeping_enabled() { return concurrent_sweeping_enabled_; } | 
|  1416  |  1125  | 
|  1417   inline Isolate* isolate(); |  | 
|  1418  |  | 
|  1419   void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags); |  | 
|  1420   void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags); |  | 
|  1421  |  | 
|  1422   inline bool OldGenerationAllocationLimitReached(); |  1126   inline bool OldGenerationAllocationLimitReached(); | 
|  1423  |  1127  | 
|  1424   void QueueMemoryChunkForFree(MemoryChunk* chunk); |  1128   void QueueMemoryChunkForFree(MemoryChunk* chunk); | 
|  1425   void FilterStoreBufferEntriesOnAboutToBeFreedPages(); |  1129   void FilterStoreBufferEntriesOnAboutToBeFreedPages(); | 
|  1426   void FreeQueuedChunks(); |  1130   void FreeQueuedChunks(); | 
|  1427  |  1131  | 
|  1428   int gc_count() const { return gc_count_; } |  | 
|  1429  |  | 
|  1430   bool RecentIdleNotificationHappened(); |  1132   bool RecentIdleNotificationHappened(); | 
|  1431  |  1133  | 
|  1432   // Completely clear the Instanceof cache (to stop it keeping objects alive |  1134   // Completely clear the Instanceof cache (to stop it keeping objects alive | 
|  1433   // around a GC). |  1135   // around a GC). | 
|  1434   inline void CompletelyClearInstanceofCache(); |  1136   inline void CompletelyClearInstanceofCache(); | 
|  1435  |  1137  | 
|  1436   // The roots that have an index less than this are always in old space. |  | 
|  1437   static const int kOldSpaceRoots = 0x20; |  | 
|  1438  |  | 
|  1439   inline uint32_t HashSeed(); |  1138   inline uint32_t HashSeed(); | 
|  1440  |  1139  | 
|  1441   inline Smi* NextScriptId(); |  1140   inline Smi* NextScriptId(); | 
|  1442  |  1141  | 
|  1443   inline void SetArgumentsAdaptorDeoptPCOffset(int pc_offset); |  1142   inline void SetArgumentsAdaptorDeoptPCOffset(int pc_offset); | 
|  1444   inline void SetConstructStubDeoptPCOffset(int pc_offset); |  1143   inline void SetConstructStubDeoptPCOffset(int pc_offset); | 
|  1445   inline void SetGetterStubDeoptPCOffset(int pc_offset); |  1144   inline void SetGetterStubDeoptPCOffset(int pc_offset); | 
|  1446   inline void SetSetterStubDeoptPCOffset(int pc_offset); |  1145   inline void SetSetterStubDeoptPCOffset(int pc_offset); | 
|  1447  |  1146  | 
|  1448   // For post mortem debugging. |  1147   // For post mortem debugging. | 
| (...skipping 12 matching lines...) Expand all  Loading... | 
|  1461   } |  1160   } | 
|  1462  |  1161  | 
|  1463   void DeoptMarkedAllocationSites(); |  1162   void DeoptMarkedAllocationSites(); | 
|  1464  |  1163  | 
|  1465   bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; } |  1164   bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; } | 
|  1466  |  1165  | 
|  1467   bool DeoptMaybeTenuredAllocationSites() { |  1166   bool DeoptMaybeTenuredAllocationSites() { | 
|  1468     return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0; |  1167     return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0; | 
|  1469   } |  1168   } | 
|  1470  |  1169  | 
|  1471   // ObjectStats are kept in two arrays, counts and sizes. Related stats are |  | 
|  1472   // stored in a contiguous linear buffer. Stats groups are stored one after |  | 
|  1473   // another. |  | 
|  1474   enum { |  | 
|  1475     FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1, |  | 
|  1476     FIRST_FIXED_ARRAY_SUB_TYPE = |  | 
|  1477         FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS, |  | 
|  1478     FIRST_CODE_AGE_SUB_TYPE = |  | 
|  1479         FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1, |  | 
|  1480     OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1 |  | 
|  1481   }; |  | 
|  1482  |  | 
|  1483   void RecordObjectStats(InstanceType type, size_t size) { |  1170   void RecordObjectStats(InstanceType type, size_t size) { | 
|  1484     DCHECK(type <= LAST_TYPE); |  1171     DCHECK(type <= LAST_TYPE); | 
|  1485     object_counts_[type]++; |  1172     object_counts_[type]++; | 
|  1486     object_sizes_[type] += size; |  1173     object_sizes_[type] += size; | 
|  1487   } |  1174   } | 
|  1488  |  1175  | 
|  1489   void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) { |  1176   void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) { | 
|  1490     int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type; |  1177     int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type; | 
|  1491     int code_age_index = |  1178     int code_age_index = | 
|  1492         FIRST_CODE_AGE_SUB_TYPE + code_age - Code::kFirstCodeAge; |  1179         FIRST_CODE_AGE_SUB_TYPE + code_age - Code::kFirstCodeAge; | 
| (...skipping 15 matching lines...) Expand all  Loading... | 
|  1508  |  1195  | 
|  1509   void TraceObjectStats(); |  1196   void TraceObjectStats(); | 
|  1510   void TraceObjectStat(const char* name, int count, int size, double time); |  1197   void TraceObjectStat(const char* name, int count, int size, double time); | 
|  1511   void CheckpointObjectStats(); |  1198   void CheckpointObjectStats(); | 
|  1512   bool GetObjectTypeName(size_t index, const char** object_type, |  1199   bool GetObjectTypeName(size_t index, const char** object_type, | 
|  1513                          const char** object_sub_type); |  1200                          const char** object_sub_type); | 
|  1514  |  1201  | 
|  1515   void RegisterStrongRoots(Object** start, Object** end); |  1202   void RegisterStrongRoots(Object** start, Object** end); | 
|  1516   void UnregisterStrongRoots(Object** start); |  1203   void UnregisterStrongRoots(Object** start); | 
|  1517  |  1204  | 
|  1518   // Taking this lock prevents the GC from entering a phase that relocates |  | 
|  1519   // object references. |  | 
|  1520   class RelocationLock { |  | 
|  1521    public: |  | 
|  1522     explicit RelocationLock(Heap* heap) : heap_(heap) { |  | 
|  1523       heap_->relocation_mutex_.Lock(); |  | 
|  1524     } |  | 
|  1525  |  | 
|  1526     ~RelocationLock() { heap_->relocation_mutex_.Unlock(); } |  | 
|  1527  |  | 
|  1528    private: |  | 
|  1529     Heap* heap_; |  | 
|  1530   }; |  | 
|  1531  |  | 
|  1532   // An optional version of the above lock that can be used for some critical |  | 
|  1533   // sections on the mutator thread; only safe since the GC currently does not |  | 
|  1534   // do concurrent compaction. |  | 
|  1535   class OptionalRelocationLock { |  | 
|  1536    public: |  | 
|  1537     OptionalRelocationLock(Heap* heap, bool concurrent) |  | 
|  1538         : heap_(heap), concurrent_(concurrent) { |  | 
|  1539       if (concurrent_) heap_->relocation_mutex_.Lock(); |  | 
|  1540     } |  | 
|  1541  |  | 
|  1542     ~OptionalRelocationLock() { |  | 
|  1543       if (concurrent_) heap_->relocation_mutex_.Unlock(); |  | 
|  1544     } |  | 
|  1545  |  | 
|  1546    private: |  | 
|  1547     Heap* heap_; |  | 
|  1548     bool concurrent_; |  | 
|  1549   }; |  | 
|  1550  |  | 
|  1551   void AddWeakObjectToCodeDependency(Handle<HeapObject> obj, |  1205   void AddWeakObjectToCodeDependency(Handle<HeapObject> obj, | 
|  1552                                      Handle<DependentCode> dep); |  1206                                      Handle<DependentCode> dep); | 
|  1553  |  1207  | 
|  1554   DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj); |  1208   DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj); | 
|  1555  |  1209  | 
|  1556   void AddRetainedMap(Handle<Map> map); |  1210   void AddRetainedMap(Handle<Map> map); | 
|  1557  |  1211  | 
|  1558   static void FatalProcessOutOfMemory(const char* location, |  | 
|  1559                                       bool take_snapshot = false); |  | 
|  1560  |  | 
|  1561   // This event is triggered after successful allocation of a new object made |  1212   // This event is triggered after successful allocation of a new object made | 
|  1562   // by runtime. Allocations of target space for object evacuation do not |  1213   // by runtime. Allocations of target space for object evacuation do not | 
|  1563   // trigger the event. In order to track ALL allocations one must turn off |  1214   // trigger the event. In order to track ALL allocations one must turn off | 
|  1564   // FLAG_inline_new and FLAG_use_allocation_folding. |  1215   // FLAG_inline_new and FLAG_use_allocation_folding. | 
|  1565   inline void OnAllocationEvent(HeapObject* object, int size_in_bytes); |  1216   inline void OnAllocationEvent(HeapObject* object, int size_in_bytes); | 
|  1566  |  1217  | 
|  1567   // This event is triggered after object is moved to a new place. |  1218   // This event is triggered after object is moved to a new place. | 
|  1568   inline void OnMoveEvent(HeapObject* target, HeapObject* source, |  1219   inline void OnMoveEvent(HeapObject* target, HeapObject* source, | 
|  1569                           int size_in_bytes); |  1220                           int size_in_bytes); | 
|  1570  |  1221  | 
| (...skipping 21 matching lines...) Expand all  Loading... | 
|  1592  |  1243  | 
|  1593   // An ArrayBuffer moved from new space to old space. |  1244   // An ArrayBuffer moved from new space to old space. | 
|  1594   void PromoteArrayBuffer(Object* buffer); |  1245   void PromoteArrayBuffer(Object* buffer); | 
|  1595  |  1246  | 
|  1596   bool HasLowAllocationRate(); |  1247   bool HasLowAllocationRate(); | 
|  1597   bool HasHighFragmentation(); |  1248   bool HasHighFragmentation(); | 
|  1598   bool HasHighFragmentation(intptr_t used, intptr_t committed); |  1249   bool HasHighFragmentation(intptr_t used, intptr_t committed); | 
|  1599  |  1250  | 
|  1600   bool ShouldOptimizeForMemoryUsage() { return optimize_for_memory_usage_; } |  1251   bool ShouldOptimizeForMemoryUsage() { return optimize_for_memory_usage_; } | 
|  1601  |  1252  | 
 |  1253   // =========================================================================== | 
 |  1254   // Initialization. =========================================================== | 
 |  1255   // =========================================================================== | 
 |  1256  | 
 |  1257   // Configure heap size in MB before setup. Return false if the heap has been | 
 |  1258   // set up already. | 
 |  1259   bool ConfigureHeap(int max_semi_space_size, int max_old_space_size, | 
 |  1260                      int max_executable_size, size_t code_range_size); | 
 |  1261   bool ConfigureHeapDefault(); | 
 |  1262  | 
 |  1263   // Prepares the heap, setting up memory areas that are needed in the isolate | 
 |  1264   // without actually creating any objects. | 
 |  1265   bool SetUp(); | 
 |  1266  | 
 |  1267   // Bootstraps the object heap with the core set of objects required to run. | 
 |  1268   // Returns whether it succeeded. | 
 |  1269   bool CreateHeapObjects(); | 
 |  1270  | 
 |  1271   // Destroys all memory allocated by the heap. | 
 |  1272   void TearDown(); | 
 |  1273  | 
 |  1274   // =========================================================================== | 
 |  1275   // Getters for spaces. ======================================================= | 
 |  1276   // =========================================================================== | 
 |  1277  | 
 |  1278   // Return the starting address and a mask for the new space.  And-masking an | 
 |  1279   // address with the mask will result in the start address of the new space | 
 |  1280   // for all addresses in either semispace. | 
 |  1281   Address NewSpaceStart() { return new_space_.start(); } | 
 |  1282   uintptr_t NewSpaceMask() { return new_space_.mask(); } | 
 |  1283   Address NewSpaceTop() { return new_space_.top(); } | 
 |  1284  | 
 |  1285   NewSpace* new_space() { return &new_space_; } | 
 |  1286   OldSpace* old_space() { return old_space_; } | 
 |  1287   OldSpace* code_space() { return code_space_; } | 
 |  1288   MapSpace* map_space() { return map_space_; } | 
 |  1289   LargeObjectSpace* lo_space() { return lo_space_; } | 
 |  1290  | 
 |  1291   PagedSpace* paged_space(int idx) { | 
 |  1292     switch (idx) { | 
 |  1293       case OLD_SPACE: | 
 |  1294         return old_space(); | 
 |  1295       case MAP_SPACE: | 
 |  1296         return map_space(); | 
 |  1297       case CODE_SPACE: | 
 |  1298         return code_space(); | 
 |  1299       case NEW_SPACE: | 
 |  1300       case LO_SPACE: | 
 |  1301         UNREACHABLE(); | 
 |  1302     } | 
 |  1303     return NULL; | 
 |  1304   } | 
 |  1305  | 
 |  1306   Space* space(int idx) { | 
 |  1307     switch (idx) { | 
 |  1308       case NEW_SPACE: | 
 |  1309         return new_space(); | 
 |  1310       case LO_SPACE: | 
 |  1311         return lo_space(); | 
 |  1312       default: | 
 |  1313         return paged_space(idx); | 
 |  1314     } | 
 |  1315   } | 
 |  1316  | 
 |  1317   // Returns name of the space. | 
 |  1318   const char* GetSpaceName(int idx); | 
 |  1319  | 
 |  1320   // =========================================================================== | 
 |  1321   // Getters to other components. ============================================== | 
 |  1322   // =========================================================================== | 
 |  1323  | 
 |  1324   GCTracer* tracer() { return tracer_; } | 
 |  1325  | 
 |  1326   PromotionQueue* promotion_queue() { return &promotion_queue_; } | 
 |  1327  | 
 |  1328   inline Isolate* isolate(); | 
 |  1329  | 
 |  1330   MarkCompactCollector* mark_compact_collector() { | 
 |  1331     return &mark_compact_collector_; | 
 |  1332   } | 
 |  1333  | 
 |  1334   StoreBuffer* store_buffer() { return &store_buffer_; } | 
 |  1335  | 
 |  1336   // =========================================================================== | 
 |  1337   // Inline allocation. ======================================================== | 
 |  1338   // =========================================================================== | 
 |  1339  | 
 |  1340   // Indicates whether inline bump-pointer allocation has been disabled. | 
 |  1341   bool inline_allocation_disabled() { return inline_allocation_disabled_; } | 
 |  1342  | 
 |  1343   // Switch whether inline bump-pointer allocation should be used. | 
 |  1344   void EnableInlineAllocation(); | 
 |  1345   void DisableInlineAllocation(); | 
 |  1346  | 
 |  1347   // =========================================================================== | 
 |  1348   // Methods triggering GCs. =================================================== | 
 |  1349   // =========================================================================== | 
 |  1350  | 
 |  1351   // Performs garbage collection operation. | 
 |  1352   // Returns whether there is a chance that another major GC could | 
 |  1353   // collect more garbage. | 
 |  1354   inline bool CollectGarbage( | 
 |  1355       AllocationSpace space, const char* gc_reason = NULL, | 
 |  1356       const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); | 
 |  1357  | 
 |  1358   // Performs a full garbage collection.  If (flags & kMakeHeapIterableMask) is | 
 |  1359   // non-zero, then the slower precise sweeper is used, which leaves the heap | 
 |  1360   // in a state where we can iterate over the heap visiting all objects. | 
 |  1361   void CollectAllGarbage( | 
 |  1362       int flags = kFinalizeIncrementalMarkingMask, const char* gc_reason = NULL, | 
 |  1363       const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); | 
 |  1364  | 
 |  1365   // Last hope GC, should try to squeeze as much as possible. | 
 |  1366   void CollectAllAvailableGarbage(const char* gc_reason = NULL); | 
 |  1367  | 
 |  1368   // Invoked when GC was requested via the stack guard. | 
 |  1369   void HandleGCRequest(); | 
 |  1370  | 
 |  1371   // =========================================================================== | 
 |  1372   // Iterators. ================================================================ | 
 |  1373   // =========================================================================== | 
 |  1374  | 
 |  1375   // Iterates over all roots in the heap. | 
 |  1376   void IterateRoots(ObjectVisitor* v, VisitMode mode); | 
 |  1377   // Iterates over all strong roots in the heap. | 
 |  1378   void IterateStrongRoots(ObjectVisitor* v, VisitMode mode); | 
 |  1379   // Iterates over entries in the smi roots list.  Only interesting to the | 
 |  1380   // serializer/deserializer, since GC does not care about smis. | 
 |  1381   void IterateSmiRoots(ObjectVisitor* v); | 
 |  1382   // Iterates over all the other roots in the heap. | 
 |  1383   void IterateWeakRoots(ObjectVisitor* v, VisitMode mode); | 
 |  1384  | 
 |  1385   // Iterate pointers to from semispace of new space found in memory interval | 
 |  1386   // from start to end within |object|. | 
 |  1387   void IterateAndMarkPointersToFromSpace(HeapObject* object, Address start, | 
 |  1388                                          Address end, bool record_slots, | 
 |  1389                                          ObjectSlotCallback callback); | 
 |  1390  | 
 |  1391   // =========================================================================== | 
 |  1392   // Incremental marking API. ================================================== | 
 |  1393   // =========================================================================== | 
 |  1394  | 
 |  1395   // Start incremental marking and ensure that idle time handler can perform | 
 |  1396   // incremental steps. | 
 |  1397   void StartIdleIncrementalMarking(); | 
 |  1398  | 
 |  1399   // Starts incremental marking assuming incremental marking is currently | 
 |  1400   // stopped. | 
 |  1401   void StartIncrementalMarking(int gc_flags, | 
 |  1402                                const GCCallbackFlags gc_callback_flags, | 
 |  1403                                const char* reason = nullptr); | 
 |  1404  | 
 |  1405   // Performs incremental marking steps of step_size_in_bytes as long as | 
 |  1406   // deadline_ins_ms is not reached. step_size_in_bytes can be 0 to compute | 
 |  1407   // an estimate increment. Returns the remaining time that cannot be used | 
 |  1408   // for incremental marking anymore because a single step would exceed the | 
 |  1409   // deadline. | 
 |  1410   double AdvanceIncrementalMarking( | 
 |  1411       intptr_t step_size_in_bytes, double deadline_in_ms, | 
 |  1412       IncrementalMarking::StepActions step_actions); | 
 |  1413  | 
 |  1414   IncrementalMarking* incremental_marking() { return &incremental_marking_; } | 
 |  1415  | 
 |  1416   // =========================================================================== | 
 |  1417   // Methods checking/returning the space of a given object/address. =========== | 
 |  1418   // =========================================================================== | 
 |  1419  | 
 |  1420   // Returns whether the object resides in new space. | 
 |  1421   inline bool InNewSpace(Object* object); | 
 |  1422   inline bool InNewSpace(Address address); | 
 |  1423   inline bool InNewSpacePage(Address address); | 
 |  1424   inline bool InFromSpace(Object* object); | 
 |  1425   inline bool InToSpace(Object* object); | 
 |  1426  | 
 |  1427   // Returns whether the object resides in old space. | 
 |  1428   inline bool InOldSpace(Address address); | 
 |  1429   inline bool InOldSpace(Object* object); | 
 |  1430  | 
 |  1431   // Checks whether an address/object in the heap (including auxiliary | 
 |  1432   // area and unused area). | 
 |  1433   bool Contains(Address addr); | 
 |  1434   bool Contains(HeapObject* value); | 
 |  1435  | 
 |  1436   // Checks whether an address/object in a space. | 
 |  1437   // Currently used by tests, serialization and heap verification only. | 
 |  1438   bool InSpace(Address addr, AllocationSpace space); | 
 |  1439   bool InSpace(HeapObject* value, AllocationSpace space); | 
 |  1440  | 
 |  1441   // =========================================================================== | 
 |  1442   // GC statistics. ============================================================ | 
 |  1443   // =========================================================================== | 
 |  1444  | 
 |  1445   // Returns the maximum amount of memory reserved for the heap.  For | 
 |  1446   // the young generation, we reserve 4 times the amount needed for a | 
 |  1447   // semi space.  The young generation consists of two semi spaces and | 
 |  1448   // we reserve twice the amount needed for those in order to ensure | 
 |  1449   // that new space can be aligned to its size. | 
 |  1450   intptr_t MaxReserved() { | 
 |  1451     return 4 * reserved_semispace_size_ + max_old_generation_size_; | 
 |  1452   } | 
 |  1453   int MaxSemiSpaceSize() { return max_semi_space_size_; } | 
 |  1454   int ReservedSemiSpaceSize() { return reserved_semispace_size_; } | 
 |  1455   int InitialSemiSpaceSize() { return initial_semispace_size_; } | 
 |  1456   int TargetSemiSpaceSize() { return target_semispace_size_; } | 
 |  1457   intptr_t MaxOldGenerationSize() { return max_old_generation_size_; } | 
 |  1458   intptr_t MaxExecutableSize() { return max_executable_size_; } | 
 |  1459  | 
 |  1460   // Returns the capacity of the heap in bytes w/o growing. Heap grows when | 
 |  1461   // more spaces are needed until it reaches the limit. | 
 |  1462   intptr_t Capacity(); | 
 |  1463  | 
 |  1464   // Returns the amount of memory currently committed for the heap. | 
 |  1465   intptr_t CommittedMemory(); | 
 |  1466  | 
 |  1467   // Returns the amount of memory currently committed for the old space. | 
 |  1468   intptr_t CommittedOldGenerationMemory(); | 
 |  1469  | 
 |  1470   // Returns the amount of executable memory currently committed for the heap. | 
 |  1471   intptr_t CommittedMemoryExecutable(); | 
 |  1472  | 
 |  1473   // Returns the amount of phyical memory currently committed for the heap. | 
 |  1474   size_t CommittedPhysicalMemory(); | 
 |  1475  | 
 |  1476   // Returns the maximum amount of memory ever committed for the heap. | 
 |  1477   intptr_t MaximumCommittedMemory() { return maximum_committed_; } | 
 |  1478  | 
 |  1479   // Updates the maximum committed memory for the heap. Should be called | 
 |  1480   // whenever a space grows. | 
 |  1481   void UpdateMaximumCommitted(); | 
 |  1482  | 
 |  1483   // Returns the available bytes in space w/o growing. | 
 |  1484   // Heap doesn't guarantee that it can allocate an object that requires | 
 |  1485   // all available bytes. Check MaxHeapObjectSize() instead. | 
 |  1486   intptr_t Available(); | 
 |  1487  | 
 |  1488   // Returns of size of all objects residing in the heap. | 
 |  1489   intptr_t SizeOfObjects(); | 
 |  1490  | 
 |  1491   void UpdateSurvivalStatistics(int start_new_space_size); | 
 |  1492  | 
 |  1493   inline void IncrementPromotedObjectsSize(int object_size) { | 
 |  1494     DCHECK(object_size > 0); | 
 |  1495     promoted_objects_size_ += object_size; | 
 |  1496   } | 
 |  1497   inline intptr_t promoted_objects_size() { return promoted_objects_size_; } | 
 |  1498  | 
 |  1499   inline void IncrementSemiSpaceCopiedObjectSize(int object_size) { | 
 |  1500     DCHECK(object_size > 0); | 
 |  1501     semi_space_copied_object_size_ += object_size; | 
 |  1502   } | 
 |  1503   inline intptr_t semi_space_copied_object_size() { | 
 |  1504     return semi_space_copied_object_size_; | 
 |  1505   } | 
 |  1506  | 
 |  1507  | 
 |  1508   inline intptr_t SurvivedNewSpaceObjectSize() { | 
 |  1509     return promoted_objects_size_ + semi_space_copied_object_size_; | 
 |  1510   } | 
 |  1511  | 
 |  1512   inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; } | 
 |  1513  | 
 |  1514   inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; } | 
 |  1515  | 
 |  1516   inline void IncrementNodesPromoted() { nodes_promoted_++; } | 
 |  1517  | 
 |  1518   inline void IncrementYoungSurvivorsCounter(int survived) { | 
 |  1519     DCHECK(survived >= 0); | 
 |  1520     survived_last_scavenge_ = survived; | 
 |  1521     survived_since_last_expansion_ += survived; | 
 |  1522   } | 
 |  1523  | 
 |  1524   inline intptr_t PromotedTotalSize() { | 
 |  1525     int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize(); | 
 |  1526     if (total > std::numeric_limits<intptr_t>::max()) { | 
 |  1527       // TODO(erikcorry): Use uintptr_t everywhere we do heap size calculations. | 
 |  1528       return std::numeric_limits<intptr_t>::max(); | 
 |  1529     } | 
 |  1530     if (total < 0) return 0; | 
 |  1531     return static_cast<intptr_t>(total); | 
 |  1532   } | 
 |  1533  | 
 |  1534   inline intptr_t OldGenerationSpaceAvailable() { | 
 |  1535     return old_generation_allocation_limit_ - PromotedTotalSize(); | 
 |  1536   } | 
 |  1537  | 
 |  1538   inline intptr_t OldGenerationCapacityAvailable() { | 
 |  1539     return max_old_generation_size_ - PromotedTotalSize(); | 
 |  1540   } | 
 |  1541  | 
 |  1542  | 
 |  1543   void UpdateNewSpaceAllocationCounter() { | 
 |  1544     new_space_allocation_counter_ = NewSpaceAllocationCounter(); | 
 |  1545   } | 
 |  1546  | 
 |  1547   size_t NewSpaceAllocationCounter() { | 
 |  1548     return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC(); | 
 |  1549   } | 
 |  1550  | 
 |  1551   // This should be used only for testing. | 
 |  1552   void set_new_space_allocation_counter(size_t new_value) { | 
 |  1553     new_space_allocation_counter_ = new_value; | 
 |  1554   } | 
 |  1555  | 
 |  1556   void UpdateOldGenerationAllocationCounter() { | 
 |  1557     old_generation_allocation_counter_ = OldGenerationAllocationCounter(); | 
 |  1558   } | 
 |  1559  | 
 |  1560   size_t OldGenerationAllocationCounter() { | 
 |  1561     return old_generation_allocation_counter_ + PromotedSinceLastGC(); | 
 |  1562   } | 
 |  1563  | 
 |  1564   // This should be used only for testing. | 
 |  1565   void set_old_generation_allocation_counter(size_t new_value) { | 
 |  1566     old_generation_allocation_counter_ = new_value; | 
 |  1567   } | 
 |  1568  | 
 |  1569   size_t PromotedSinceLastGC() { | 
 |  1570     return PromotedSpaceSizeOfObjects() - old_generation_size_at_last_gc_; | 
 |  1571   } | 
 |  1572  | 
 |  1573   // Update GC statistics that are tracked on the Heap. | 
 |  1574   void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator, | 
 |  1575                                     double marking_time); | 
 |  1576  | 
 |  1577   // Returns maximum GC pause. | 
 |  1578   double get_max_gc_pause() { return max_gc_pause_; } | 
 |  1579  | 
 |  1580   // Returns maximum size of objects alive after GC. | 
 |  1581   intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; } | 
 |  1582  | 
 |  1583   // Returns minimal interval between two subsequent collections. | 
 |  1584   double get_min_in_mutator() { return min_in_mutator_; } | 
 |  1585  | 
 |  1586   int gc_count() const { return gc_count_; } | 
 |  1587  | 
 |  1588   // Returns the size of objects residing in non new spaces. | 
 |  1589   intptr_t PromotedSpaceSizeOfObjects(); | 
 |  1590  | 
 |  1591   double total_regexp_code_generated() { return total_regexp_code_generated_; } | 
 |  1592   void IncreaseTotalRegexpCodeGenerated(int size) { | 
 |  1593     total_regexp_code_generated_ += size; | 
 |  1594   } | 
 |  1595  | 
 |  1596   void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) { | 
 |  1597     if (is_crankshafted) { | 
 |  1598       crankshaft_codegen_bytes_generated_ += size; | 
 |  1599     } else { | 
 |  1600       full_codegen_bytes_generated_ += size; | 
 |  1601     } | 
 |  1602   } | 
 |  1603  | 
 |  1604   // =========================================================================== | 
 |  1605   // Prologue/epilogue callback methods.======================================== | 
 |  1606   // =========================================================================== | 
 |  1607  | 
 |  1608   void AddGCPrologueCallback(v8::Isolate::GCCallback callback, | 
 |  1609                              GCType gc_type_filter, bool pass_isolate = true); | 
 |  1610   void RemoveGCPrologueCallback(v8::Isolate::GCCallback callback); | 
 |  1611  | 
 |  1612   void AddGCEpilogueCallback(v8::Isolate::GCCallback callback, | 
 |  1613                              GCType gc_type_filter, bool pass_isolate = true); | 
 |  1614   void RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback); | 
 |  1615  | 
 |  1616   void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags); | 
 |  1617   void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags); | 
 |  1618  | 
 |  1619   // =========================================================================== | 
 |  1620   // Allocation methods. ======================================================= | 
 |  1621   // =========================================================================== | 
 |  1622  | 
 |  1623   // Returns a deep copy of the JavaScript object. | 
 |  1624   // Properties and elements are copied too. | 
 |  1625   // Optionally takes an AllocationSite to be appended in an AllocationMemento. | 
 |  1626   MUST_USE_RESULT AllocationResult CopyJSObject(JSObject* source, | 
 |  1627                                                 AllocationSite* site = NULL); | 
 |  1628  | 
 |  1629   // Creates a filler object and returns a heap object immediately after it. | 
 |  1630   MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object, | 
 |  1631                                                 int filler_size); | 
 |  1632   // Creates a filler object if needed for alignment and returns a heap object | 
 |  1633   // immediately after it. If any space is left after the returned object, | 
 |  1634   // another filler object is created so the over allocated memory is iterable. | 
 |  1635   MUST_USE_RESULT HeapObject* AlignWithFiller(HeapObject* object, | 
 |  1636                                               int object_size, | 
 |  1637                                               int allocation_size, | 
 |  1638                                               AllocationAlignment alignment); | 
 |  1639  | 
 |  1640 // ============================================================================= | 
 |  1641  | 
 |  1642 #ifdef VERIFY_HEAP | 
 |  1643   // Verify the heap is in its normal state before or after a GC. | 
 |  1644   void Verify(); | 
 |  1645 #endif | 
 |  1646  | 
 |  1647 #ifdef DEBUG | 
 |  1648   void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; } | 
 |  1649  | 
 |  1650   void TracePathToObjectFrom(Object* target, Object* root); | 
 |  1651   void TracePathToObject(Object* target); | 
 |  1652   void TracePathToGlobal(); | 
 |  1653  | 
 |  1654   void Print(); | 
 |  1655   void PrintHandles(); | 
 |  1656  | 
 |  1657   // Report heap statistics. | 
 |  1658   void ReportHeapStatistics(const char* title); | 
 |  1659   void ReportCodeStatistics(const char* title); | 
 |  1660 #endif | 
 |  1661  | 
|  1602  private: |  1662  private: | 
|  1603   static const int kInitialStringTableSize = 2048; |  1663   struct StrongRootsList; | 
|  1604   static const int kInitialEvalCacheSize = 64; |  | 
|  1605   static const int kInitialNumberStringCacheSize = 256; |  | 
|  1606  |  | 
|  1607   Heap(); |  | 
|  1608  |  | 
|  1609   int current_gc_flags() { return current_gc_flags_; } |  | 
|  1610   void set_current_gc_flags(int flags) { |  | 
|  1611     current_gc_flags_ = flags; |  | 
|  1612     DCHECK(!ShouldFinalizeIncrementalMarking() || |  | 
|  1613            !ShouldAbortIncrementalMarking()); |  | 
|  1614   } |  | 
|  1615  |  | 
|  1616   inline bool ShouldReduceMemory() const { |  | 
|  1617     return current_gc_flags_ & kReduceMemoryFootprintMask; |  | 
|  1618   } |  | 
|  1619  |  | 
|  1620   inline bool ShouldAbortIncrementalMarking() const { |  | 
|  1621     return current_gc_flags_ & kAbortIncrementalMarkingMask; |  | 
|  1622   } |  | 
|  1623  |  | 
|  1624   inline bool ShouldFinalizeIncrementalMarking() const { |  | 
|  1625     return current_gc_flags_ & kFinalizeIncrementalMarkingMask; |  | 
|  1626   } |  | 
|  1627  |  | 
|  1628   // Allocates a JS Map in the heap. |  | 
|  1629   MUST_USE_RESULT AllocationResult |  | 
|  1630       AllocateMap(InstanceType instance_type, int instance_size, |  | 
|  1631                   ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND); |  | 
|  1632  |  | 
|  1633   // Allocates and initializes a new JavaScript object based on a |  | 
|  1634   // constructor. |  | 
|  1635   // If allocation_site is non-null, then a memento is emitted after the object |  | 
|  1636   // that points to the site. |  | 
|  1637   MUST_USE_RESULT AllocationResult |  | 
|  1638       AllocateJSObject(JSFunction* constructor, |  | 
|  1639                        PretenureFlag pretenure = NOT_TENURED, |  | 
|  1640                        AllocationSite* allocation_site = NULL); |  | 
|  1641  |  | 
|  1642   // Allocates and initializes a new JavaScript object based on a map. |  | 
|  1643   // Passing an allocation site means that a memento will be created that |  | 
|  1644   // points to the site. |  | 
|  1645   MUST_USE_RESULT AllocationResult |  | 
|  1646       AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED, |  | 
|  1647                               AllocationSite* allocation_site = NULL); |  | 
|  1648  |  | 
|  1649   // Allocates a HeapNumber from value. |  | 
|  1650   MUST_USE_RESULT AllocationResult |  | 
|  1651       AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE, |  | 
|  1652                          PretenureFlag pretenure = NOT_TENURED); |  | 
|  1653  |  | 
|  1654 // Allocates SIMD values from the given lane values. |  | 
|  1655 #define SIMD_ALLOCATE_DECLARATION(TYPE, Type, type, lane_count, lane_type) \ |  | 
|  1656   AllocationResult Allocate##Type(lane_type lanes[lane_count],             \ |  | 
|  1657                                   PretenureFlag pretenure = NOT_TENURED); |  | 
|  1658   SIMD128_TYPES(SIMD_ALLOCATE_DECLARATION) |  | 
|  1659 #undef SIMD_ALLOCATE_DECLARATION |  | 
|  1660  |  | 
|  1661   // Allocates a byte array of the specified length |  | 
|  1662   MUST_USE_RESULT AllocationResult |  | 
|  1663       AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED); |  | 
|  1664  |  | 
|  1665   // Allocates a bytecode array with given contents. |  | 
|  1666   MUST_USE_RESULT AllocationResult |  | 
|  1667       AllocateBytecodeArray(int length, const byte* raw_bytecodes, |  | 
|  1668                             int frame_size); |  | 
|  1669  |  | 
|  1670   // Copy the code and scope info part of the code object, but insert |  | 
|  1671   // the provided data as the relocation information. |  | 
|  1672   MUST_USE_RESULT AllocationResult |  | 
|  1673       CopyCode(Code* code, Vector<byte> reloc_info); |  | 
|  1674  |  | 
|  1675   MUST_USE_RESULT AllocationResult CopyCode(Code* code); |  | 
|  1676  |  | 
|  1677   // Allocates a fixed array initialized with undefined values |  | 
|  1678   MUST_USE_RESULT AllocationResult |  | 
|  1679       AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED); |  | 
|  1680  |  | 
|  1681   // The amount of external memory registered through the API kept alive |  | 
|  1682   // by global handles |  | 
|  1683   int64_t amount_of_external_allocated_memory_; |  | 
|  1684  |  | 
|  1685   // Caches the amount of external memory registered at the last global gc. |  | 
|  1686   int64_t amount_of_external_allocated_memory_at_last_global_gc_; |  | 
|  1687  |  | 
|  1688   // This can be calculated directly from a pointer to the heap; however, it is |  | 
|  1689   // more expedient to get at the isolate directly from within Heap methods. |  | 
|  1690   Isolate* isolate_; |  | 
|  1691  |  | 
|  1692   Object* roots_[kRootListLength]; |  | 
|  1693  |  | 
|  1694   size_t code_range_size_; |  | 
|  1695   int reserved_semispace_size_; |  | 
|  1696   int max_semi_space_size_; |  | 
|  1697   int initial_semispace_size_; |  | 
|  1698   int target_semispace_size_; |  | 
|  1699   intptr_t max_old_generation_size_; |  | 
|  1700   intptr_t initial_old_generation_size_; |  | 
|  1701   bool old_generation_size_configured_; |  | 
|  1702   intptr_t max_executable_size_; |  | 
|  1703   intptr_t maximum_committed_; |  | 
|  1704  |  | 
|  1705   // For keeping track of how much data has survived |  | 
|  1706   // scavenge since last new space expansion. |  | 
|  1707   int survived_since_last_expansion_; |  | 
|  1708  |  | 
|  1709   // ... and since the last scavenge. |  | 
|  1710   int survived_last_scavenge_; |  | 
|  1711  |  | 
|  1712   int always_allocate_scope_depth_; |  | 
|  1713  |  | 
|  1714   // For keeping track of context disposals. |  | 
|  1715   int contexts_disposed_; |  | 
|  1716  |  | 
|  1717   int global_ic_age_; |  | 
|  1718  |  | 
|  1719   int scan_on_scavenge_pages_; |  | 
|  1720  |  | 
|  1721   NewSpace new_space_; |  | 
|  1722   OldSpace* old_space_; |  | 
|  1723   OldSpace* code_space_; |  | 
|  1724   MapSpace* map_space_; |  | 
|  1725   LargeObjectSpace* lo_space_; |  | 
|  1726   HeapState gc_state_; |  | 
|  1727   int gc_post_processing_depth_; |  | 
|  1728   Address new_space_top_after_last_gc_; |  | 
|  1729  |  | 
|  1730   // Returns the amount of external memory registered since last global gc. |  | 
|  1731   int64_t PromotedExternalMemorySize(); |  | 
|  1732  |  | 
|  1733   // How many "runtime allocations" happened. |  | 
|  1734   uint32_t allocations_count_; |  | 
|  1735  |  | 
|  1736   // Running hash over allocations performed. |  | 
|  1737   uint32_t raw_allocations_hash_; |  | 
|  1738  |  | 
|  1739   // Countdown counter, dumps allocation hash when 0. |  | 
|  1740   uint32_t dump_allocations_hash_countdown_; |  | 
|  1741  |  | 
|  1742   // How many mark-sweep collections happened. |  | 
|  1743   unsigned int ms_count_; |  | 
|  1744  |  | 
|  1745   // How many gc happened. |  | 
|  1746   unsigned int gc_count_; |  | 
|  1747  |  | 
|  1748   // For post mortem debugging. |  | 
|  1749   static const int kRememberedUnmappedPages = 128; |  | 
|  1750   int remembered_unmapped_pages_index_; |  | 
|  1751   Address remembered_unmapped_pages_[kRememberedUnmappedPages]; |  | 
|  1752  |  | 
|  1753 #define ROOT_ACCESSOR(type, name, camel_name) \ |  | 
|  1754   inline void set_##name(type* value); |  | 
|  1755   ROOT_LIST(ROOT_ACCESSOR) |  | 
|  1756 #undef ROOT_ACCESSOR |  | 
|  1757  |  | 
|  1758 #ifdef DEBUG |  | 
|  1759   // If the --gc-interval flag is set to a positive value, this |  | 
|  1760   // variable holds the value indicating the number of allocations |  | 
|  1761   // remain until the next failure and garbage collection. |  | 
|  1762   int allocation_timeout_; |  | 
|  1763 #endif  // DEBUG |  | 
|  1764  |  | 
|  1765   // Limit that triggers a global GC on the next (normally caused) GC.  This |  | 
|  1766   // is checked when we have already decided to do a GC to help determine |  | 
|  1767   // which collector to invoke, before expanding a paged space in the old |  | 
|  1768   // generation and on every allocation in large object space. |  | 
|  1769   intptr_t old_generation_allocation_limit_; |  | 
|  1770  |  | 
|  1771   // Indicates that an allocation has failed in the old generation since the |  | 
|  1772   // last GC. |  | 
|  1773   bool old_gen_exhausted_; |  | 
|  1774  |  | 
|  1775   // Indicates that memory usage is more important than latency. |  | 
|  1776   // TODO(ulan): Merge it with memory reducer once chromium:490559 is fixed. |  | 
|  1777   bool optimize_for_memory_usage_; |  | 
|  1778  |  | 
|  1779   // Indicates that inline bump-pointer allocation has been globally disabled |  | 
|  1780   // for all spaces. This is used to disable allocations in generated code. |  | 
|  1781   bool inline_allocation_disabled_; |  | 
|  1782  |  | 
|  1783   // Weak list heads, threaded through the objects. |  | 
|  1784   // List heads are initialized lazily and contain the undefined_value at start. |  | 
|  1785   Object* native_contexts_list_; |  | 
|  1786   Object* allocation_sites_list_; |  | 
|  1787  |  | 
|  1788   // List of encountered weak collections (JSWeakMap and JSWeakSet) during |  | 
|  1789   // marking. It is initialized during marking, destroyed after marking and |  | 
|  1790   // contains Smi(0) while marking is not active. |  | 
|  1791   Object* encountered_weak_collections_; |  | 
|  1792  |  | 
|  1793   Object* encountered_weak_cells_; |  | 
|  1794  |  | 
|  1795   StoreBufferRebuilder store_buffer_rebuilder_; |  | 
|  1796  |  1664  | 
|  1797   struct StringTypeTable { |  1665   struct StringTypeTable { | 
|  1798     InstanceType type; |  1666     InstanceType type; | 
|  1799     int size; |  1667     int size; | 
|  1800     RootListIndex index; |  1668     RootListIndex index; | 
|  1801   }; |  1669   }; | 
|  1802  |  1670  | 
|  1803   struct ConstantStringTable { |  1671   struct ConstantStringTable { | 
|  1804     const char* contents; |  1672     const char* contents; | 
|  1805     RootListIndex index; |  1673     RootListIndex index; | 
|  1806   }; |  1674   }; | 
|  1807  |  1675  | 
|  1808   struct StructTable { |  1676   struct StructTable { | 
|  1809     InstanceType type; |  1677     InstanceType type; | 
|  1810     int size; |  1678     int size; | 
|  1811     RootListIndex index; |  1679     RootListIndex index; | 
|  1812   }; |  1680   }; | 
|  1813  |  1681  | 
|  1814   static const StringTypeTable string_type_table[]; |  | 
|  1815   static const ConstantStringTable constant_string_table[]; |  | 
|  1816   static const StructTable struct_table[]; |  | 
|  1817  |  | 
|  1818   struct GCCallbackPair { |  1682   struct GCCallbackPair { | 
|  1819     GCCallbackPair(v8::Isolate::GCCallback callback, GCType gc_type, |  1683     GCCallbackPair(v8::Isolate::GCCallback callback, GCType gc_type, | 
|  1820                    bool pass_isolate) |  1684                    bool pass_isolate) | 
|  1821         : callback(callback), gc_type(gc_type), pass_isolate(pass_isolate) {} |  1685         : callback(callback), gc_type(gc_type), pass_isolate(pass_isolate) {} | 
|  1822  |  1686  | 
|  1823     bool operator==(const GCCallbackPair& other) const { |  1687     bool operator==(const GCCallbackPair& other) const { | 
|  1824       return other.callback == callback; |  1688       return other.callback == callback; | 
|  1825     } |  1689     } | 
|  1826  |  1690  | 
|  1827     v8::Isolate::GCCallback callback; |  1691     v8::Isolate::GCCallback callback; | 
|  1828     GCType gc_type; |  1692     GCType gc_type; | 
|  1829     bool pass_isolate; |  1693     bool pass_isolate; | 
|  1830   }; |  1694   }; | 
|  1831  |  1695  | 
|  1832   List<GCCallbackPair> gc_epilogue_callbacks_; |  1696   static const int kInitialStringTableSize = 2048; | 
|  1833   List<GCCallbackPair> gc_prologue_callbacks_; |  1697   static const int kInitialEvalCacheSize = 64; | 
 |  1698   static const int kInitialNumberStringCacheSize = 256; | 
 |  1699  | 
 |  1700   static const int kRememberedUnmappedPages = 128; | 
 |  1701  | 
 |  1702   static const StringTypeTable string_type_table[]; | 
 |  1703   static const ConstantStringTable constant_string_table[]; | 
 |  1704   static const StructTable struct_table[]; | 
 |  1705  | 
 |  1706   static const int kYoungSurvivalRateHighThreshold = 90; | 
 |  1707   static const int kYoungSurvivalRateAllowedDeviation = 15; | 
 |  1708   static const int kOldSurvivalRateLowThreshold = 10; | 
 |  1709  | 
 |  1710   static const int kMaxMarkCompactsInIdleRound = 7; | 
 |  1711   static const int kIdleScavengeThreshold = 5; | 
 |  1712  | 
 |  1713   static const int kAllocationSiteScratchpadSize = 256; | 
 |  1714  | 
 |  1715   Heap(); | 
 |  1716  | 
 |  1717   static String* UpdateNewSpaceReferenceInExternalStringTableEntry( | 
 |  1718       Heap* heap, Object** pointer); | 
 |  1719  | 
 |  1720   static void ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page, | 
 |  1721                                           StoreBufferEvent event); | 
 |  1722  | 
 |  1723   // Selects the proper allocation space depending on the given object | 
 |  1724   // size and pretenuring decision. | 
 |  1725   static AllocationSpace SelectSpace(int object_size, PretenureFlag pretenure) { | 
 |  1726     if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE; | 
 |  1727     return (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE; | 
 |  1728   } | 
 |  1729  | 
 |  1730   int current_gc_flags() { return current_gc_flags_; } | 
 |  1731  | 
 |  1732   void set_current_gc_flags(int flags) { | 
 |  1733     current_gc_flags_ = flags; | 
 |  1734     DCHECK(!ShouldFinalizeIncrementalMarking() || | 
 |  1735            !ShouldAbortIncrementalMarking()); | 
 |  1736   } | 
 |  1737  | 
 |  1738   inline bool ShouldReduceMemory() const { | 
 |  1739     return current_gc_flags_ & kReduceMemoryFootprintMask; | 
 |  1740   } | 
 |  1741  | 
 |  1742   inline bool ShouldAbortIncrementalMarking() const { | 
 |  1743     return current_gc_flags_ & kAbortIncrementalMarkingMask; | 
 |  1744   } | 
 |  1745  | 
 |  1746   inline bool ShouldFinalizeIncrementalMarking() const { | 
 |  1747     return current_gc_flags_ & kFinalizeIncrementalMarkingMask; | 
 |  1748   } | 
 |  1749  | 
 |  1750 #define ROOT_ACCESSOR(type, name, camel_name) \ | 
 |  1751   inline void set_##name(type* value); | 
 |  1752   ROOT_LIST(ROOT_ACCESSOR) | 
 |  1753 #undef ROOT_ACCESSOR | 
|  1834  |  1754  | 
|  1835   // Code that should be run before and after each GC.  Includes some |  1755   // Code that should be run before and after each GC.  Includes some | 
|  1836   // reporting/verification activities when compiled with DEBUG set. |  1756   // reporting/verification activities when compiled with DEBUG set. | 
|  1837   void GarbageCollectionPrologue(); |  1757   void GarbageCollectionPrologue(); | 
|  1838   void GarbageCollectionEpilogue(); |  1758   void GarbageCollectionEpilogue(); | 
|  1839  |  1759  | 
|  1840   void PreprocessStackTraces(); |  1760   void PreprocessStackTraces(); | 
|  1841  |  1761  | 
|  1842   // Pretenuring decisions are made based on feedback collected during new |  1762   // Pretenuring decisions are made based on feedback collected during new | 
|  1843   // space evacuation. Note that between feedback collection and calling this |  1763   // space evacuation. Note that between feedback collection and calling this | 
| (...skipping 24 matching lines...) Expand all  Loading... | 
|  1868  |  1788  | 
|  1869   // Performs garbage collection |  1789   // Performs garbage collection | 
|  1870   // Returns whether there is a chance another major GC could |  1790   // Returns whether there is a chance another major GC could | 
|  1871   // collect more garbage. |  1791   // collect more garbage. | 
|  1872   bool PerformGarbageCollection( |  1792   bool PerformGarbageCollection( | 
|  1873       GarbageCollector collector, |  1793       GarbageCollector collector, | 
|  1874       const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |  1794       const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); | 
|  1875  |  1795  | 
|  1876   inline void UpdateOldSpaceLimits(); |  1796   inline void UpdateOldSpaceLimits(); | 
|  1877  |  1797  | 
|  1878   // Selects the proper allocation space depending on the given object |  1798   // Initializes a JSObject based on its map. | 
|  1879   // size and pretenuring decision. |  1799   void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties, | 
|  1880   static AllocationSpace SelectSpace(int object_size, |  1800                                  Map* map); | 
|  1881                                      PretenureFlag pretenure) { |  1801   void InitializeAllocationMemento(AllocationMemento* memento, | 
|  1882     if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE; |  1802                                    AllocationSite* allocation_site); | 
|  1883     return (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE; |  1803  | 
|  1884   } |  1804   bool CreateInitialMaps(); | 
 |  1805   void CreateInitialObjects(); | 
 |  1806  | 
 |  1807   // These five Create*EntryStub functions are here and forced to not be inlined | 
 |  1808   // because of a gcc-4.4 bug that assigns wrong vtable entries. | 
 |  1809   NO_INLINE(void CreateJSEntryStub()); | 
 |  1810   NO_INLINE(void CreateJSConstructEntryStub()); | 
 |  1811  | 
 |  1812   void CreateFixedStubs(); | 
|  1885  |  1813  | 
|  1886   HeapObject* DoubleAlignForDeserialization(HeapObject* object, int size); |  1814   HeapObject* DoubleAlignForDeserialization(HeapObject* object, int size); | 
|  1887  |  1815  | 
 |  1816   // Performs a minor collection in new generation. | 
 |  1817   void Scavenge(); | 
 |  1818  | 
 |  1819   // Commits from space if it is uncommitted. | 
 |  1820   void EnsureFromSpaceIsCommitted(); | 
 |  1821  | 
 |  1822   // Uncommit unused semi space. | 
 |  1823   bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); } | 
 |  1824  | 
 |  1825   // Fill in bogus values in from space | 
 |  1826   void ZapFromSpace(); | 
 |  1827  | 
 |  1828   Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front); | 
 |  1829  | 
 |  1830   // Performs a major collection in the whole heap. | 
 |  1831   void MarkCompact(); | 
 |  1832  | 
 |  1833   // Code to be run before and after mark-compact. | 
 |  1834   void MarkCompactPrologue(); | 
 |  1835   void MarkCompactEpilogue(); | 
 |  1836  | 
 |  1837   void ProcessNativeContexts(WeakObjectRetainer* retainer); | 
 |  1838   void ProcessAllocationSites(WeakObjectRetainer* retainer); | 
 |  1839  | 
 |  1840   // Deopts all code that contains allocation instruction which are tenured or | 
 |  1841   // not tenured. Moreover it clears the pretenuring allocation site statistics. | 
 |  1842   void ResetAllAllocationSitesDependentCode(PretenureFlag flag); | 
 |  1843  | 
 |  1844   // Evaluates local pretenuring for the old space and calls | 
 |  1845   // ResetAllTenuredAllocationSitesDependentCode if too many objects died in | 
 |  1846   // the old space. | 
 |  1847   void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc); | 
 |  1848  | 
 |  1849   // Called on heap tear-down. Frees all remaining ArrayBuffer backing stores. | 
 |  1850   void TearDownArrayBuffers(); | 
 |  1851  | 
 |  1852   // These correspond to the non-Helper versions. | 
 |  1853   void RegisterNewArrayBufferHelper(std::map<void*, size_t>& live_buffers, | 
 |  1854                                     void* data, size_t length); | 
 |  1855   void UnregisterArrayBufferHelper( | 
 |  1856       std::map<void*, size_t>& live_buffers, | 
 |  1857       std::map<void*, size_t>& not_yet_discovered_buffers, void* data); | 
 |  1858   void RegisterLiveArrayBufferHelper( | 
 |  1859       std::map<void*, size_t>& not_yet_discovered_buffers, void* data); | 
 |  1860   size_t FreeDeadArrayBuffersHelper( | 
 |  1861       Isolate* isolate, std::map<void*, size_t>& live_buffers, | 
 |  1862       std::map<void*, size_t>& not_yet_discovered_buffers); | 
 |  1863   void TearDownArrayBuffersHelper( | 
 |  1864       Isolate* isolate, std::map<void*, size_t>& live_buffers, | 
 |  1865       std::map<void*, size_t>& not_yet_discovered_buffers); | 
 |  1866  | 
 |  1867   // Record statistics before and after garbage collection. | 
 |  1868   void ReportStatisticsBeforeGC(); | 
 |  1869   void ReportStatisticsAfterGC(); | 
 |  1870  | 
 |  1871   // Creates and installs the full-sized number string cache. | 
 |  1872   int FullSizeNumberStringCacheLength(); | 
 |  1873   // Flush the number to string cache. | 
 |  1874   void FlushNumberStringCache(); | 
 |  1875  | 
 |  1876   // Sets used allocation sites entries to undefined. | 
 |  1877   void FlushAllocationSitesScratchpad(); | 
 |  1878  | 
 |  1879   // Initializes the allocation sites scratchpad with undefined values. | 
 |  1880   void InitializeAllocationSitesScratchpad(); | 
 |  1881  | 
 |  1882   // Adds an allocation site to the scratchpad if there is space left. | 
 |  1883   void AddAllocationSiteToScratchpad(AllocationSite* site, | 
 |  1884                                      ScratchpadSlotMode mode); | 
 |  1885  | 
 |  1886   // TODO(hpayer): Allocation site pretenuring may make this method obsolete. | 
 |  1887   // Re-visit incremental marking heuristics. | 
 |  1888   bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; } | 
 |  1889  | 
 |  1890   void ConfigureInitialOldGenerationSize(); | 
 |  1891  | 
 |  1892   void SelectScavengingVisitorsTable(); | 
 |  1893  | 
 |  1894   bool HasLowYoungGenerationAllocationRate(); | 
 |  1895   bool HasLowOldGenerationAllocationRate(); | 
 |  1896   double YoungGenerationMutatorUtilization(); | 
 |  1897   double OldGenerationMutatorUtilization(); | 
 |  1898  | 
 |  1899   void ReduceNewSpaceSize(); | 
 |  1900  | 
 |  1901   bool TryFinalizeIdleIncrementalMarking( | 
 |  1902       double idle_time_in_ms, size_t size_of_objects, | 
 |  1903       size_t mark_compact_speed_in_bytes_per_ms); | 
 |  1904  | 
 |  1905   GCIdleTimeHandler::HeapState ComputeHeapState(); | 
 |  1906  | 
 |  1907   bool PerformIdleTimeAction(GCIdleTimeAction action, | 
 |  1908                              GCIdleTimeHandler::HeapState heap_state, | 
 |  1909                              double deadline_in_ms); | 
 |  1910  | 
 |  1911   void IdleNotificationEpilogue(GCIdleTimeAction action, | 
 |  1912                                 GCIdleTimeHandler::HeapState heap_state, | 
 |  1913                                 double start_ms, double deadline_in_ms); | 
 |  1914   void CheckAndNotifyBackgroundIdleNotification(double idle_time_in_ms, | 
 |  1915                                                 double now_ms); | 
 |  1916  | 
 |  1917   void ClearObjectStats(bool clear_last_time_stats = false); | 
 |  1918  | 
 |  1919   inline void UpdateAllocationsHash(HeapObject* object); | 
 |  1920   inline void UpdateAllocationsHash(uint32_t value); | 
 |  1921   inline void PrintAlloctionsHash(); | 
 |  1922  | 
 |  1923   void AddToRingBuffer(const char* string); | 
 |  1924   void GetFromRingBuffer(char* buffer); | 
 |  1925  | 
 |  1926   // =========================================================================== | 
 |  1927   // Allocation methods. ======================================================= | 
 |  1928   // =========================================================================== | 
 |  1929  | 
 |  1930   // Allocates a JS Map in the heap. | 
 |  1931   MUST_USE_RESULT AllocationResult | 
 |  1932   AllocateMap(InstanceType instance_type, int instance_size, | 
 |  1933               ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND); | 
 |  1934  | 
 |  1935   // Allocates and initializes a new JavaScript object based on a | 
 |  1936   // constructor. | 
 |  1937   // If allocation_site is non-null, then a memento is emitted after the object | 
 |  1938   // that points to the site. | 
 |  1939   MUST_USE_RESULT AllocationResult AllocateJSObject( | 
 |  1940       JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED, | 
 |  1941       AllocationSite* allocation_site = NULL); | 
 |  1942  | 
 |  1943   // Allocates and initializes a new JavaScript object based on a map. | 
 |  1944   // Passing an allocation site means that a memento will be created that | 
 |  1945   // points to the site. | 
 |  1946   MUST_USE_RESULT AllocationResult | 
 |  1947   AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED, | 
 |  1948                           AllocationSite* allocation_site = NULL); | 
 |  1949  | 
 |  1950   // Allocates a HeapNumber from value. | 
 |  1951   MUST_USE_RESULT AllocationResult | 
 |  1952   AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE, | 
 |  1953                      PretenureFlag pretenure = NOT_TENURED); | 
 |  1954  | 
 |  1955 // Allocates SIMD values from the given lane values. | 
 |  1956 #define SIMD_ALLOCATE_DECLARATION(TYPE, Type, type, lane_count, lane_type) \ | 
 |  1957   AllocationResult Allocate##Type(lane_type lanes[lane_count],             \ | 
 |  1958                                   PretenureFlag pretenure = NOT_TENURED); | 
 |  1959   SIMD128_TYPES(SIMD_ALLOCATE_DECLARATION) | 
 |  1960 #undef SIMD_ALLOCATE_DECLARATION | 
 |  1961  | 
 |  1962   // Allocates a byte array of the specified length | 
 |  1963   MUST_USE_RESULT AllocationResult | 
 |  1964   AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED); | 
 |  1965  | 
 |  1966   // Allocates a bytecode array with given contents. | 
 |  1967   MUST_USE_RESULT AllocationResult | 
 |  1968   AllocateBytecodeArray(int length, const byte* raw_bytecodes, int frame_size); | 
 |  1969  | 
 |  1970   // Copy the code and scope info part of the code object, but insert | 
 |  1971   // the provided data as the relocation information. | 
 |  1972   MUST_USE_RESULT AllocationResult CopyCode(Code* code, | 
 |  1973                                             Vector<byte> reloc_info); | 
 |  1974  | 
 |  1975   MUST_USE_RESULT AllocationResult CopyCode(Code* code); | 
 |  1976  | 
 |  1977   // Allocates a fixed array initialized with undefined values | 
 |  1978   MUST_USE_RESULT AllocationResult | 
 |  1979   AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED); | 
 |  1980  | 
|  1888   // Allocate an uninitialized object.  The memory is non-executable if the |  1981   // Allocate an uninitialized object.  The memory is non-executable if the | 
|  1889   // hardware and OS allow.  This is the single choke-point for allocations |  1982   // hardware and OS allow.  This is the single choke-point for allocations | 
|  1890   // performed by the runtime and should not be bypassed (to extend this to |  1983   // performed by the runtime and should not be bypassed (to extend this to | 
|  1891   // inlined allocations, use the Heap::DisableInlineAllocation() support). |  1984   // inlined allocations, use the Heap::DisableInlineAllocation() support). | 
|  1892   MUST_USE_RESULT inline AllocationResult AllocateRaw( |  1985   MUST_USE_RESULT inline AllocationResult AllocateRaw( | 
|  1893       int size_in_bytes, AllocationSpace space, AllocationSpace retry_space, |  1986       int size_in_bytes, AllocationSpace space, AllocationSpace retry_space, | 
|  1894       AllocationAlignment aligment = kWordAligned); |  1987       AllocationAlignment aligment = kWordAligned); | 
|  1895  |  1988  | 
|  1896   // Allocates a heap object based on the map. |  1989   // Allocates a heap object based on the map. | 
|  1897   MUST_USE_RESULT AllocationResult |  1990   MUST_USE_RESULT AllocationResult | 
|  1898       Allocate(Map* map, AllocationSpace space, |  1991       Allocate(Map* map, AllocationSpace space, | 
|  1899                AllocationSite* allocation_site = NULL); |  1992                AllocationSite* allocation_site = NULL); | 
|  1900  |  1993  | 
|  1901   // Allocates a partial map for bootstrapping. |  1994   // Allocates a partial map for bootstrapping. | 
|  1902   MUST_USE_RESULT AllocationResult |  1995   MUST_USE_RESULT AllocationResult | 
|  1903       AllocatePartialMap(InstanceType instance_type, int instance_size); |  1996       AllocatePartialMap(InstanceType instance_type, int instance_size); | 
|  1904  |  1997  | 
|  1905   // Initializes a JSObject based on its map. |  | 
|  1906   void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties, |  | 
|  1907                                  Map* map); |  | 
|  1908   void InitializeAllocationMemento(AllocationMemento* memento, |  | 
|  1909                                    AllocationSite* allocation_site); |  | 
|  1910  |  | 
|  1911   // Allocate a block of memory in the given space (filled with a filler). |  1998   // Allocate a block of memory in the given space (filled with a filler). | 
|  1912   // Used as a fall-back for generated code when the space is full. |  1999   // Used as a fall-back for generated code when the space is full. | 
|  1913   MUST_USE_RESULT AllocationResult |  2000   MUST_USE_RESULT AllocationResult | 
|  1914       AllocateFillerObject(int size, bool double_align, AllocationSpace space); |  2001       AllocateFillerObject(int size, bool double_align, AllocationSpace space); | 
|  1915  |  2002  | 
|  1916   // Allocate an uninitialized fixed array. |  2003   // Allocate an uninitialized fixed array. | 
|  1917   MUST_USE_RESULT AllocationResult |  2004   MUST_USE_RESULT AllocationResult | 
|  1918       AllocateRawFixedArray(int length, PretenureFlag pretenure); |  2005       AllocateRawFixedArray(int length, PretenureFlag pretenure); | 
|  1919  |  2006  | 
|  1920   // Allocate an uninitialized fixed double array. |  2007   // Allocate an uninitialized fixed double array. | 
|  1921   MUST_USE_RESULT AllocationResult |  2008   MUST_USE_RESULT AllocationResult | 
|  1922       AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure); |  2009       AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure); | 
|  1923  |  2010  | 
|  1924   // Allocate an initialized fixed array with the given filler value. |  2011   // Allocate an initialized fixed array with the given filler value. | 
|  1925   MUST_USE_RESULT AllocationResult |  2012   MUST_USE_RESULT AllocationResult | 
|  1926       AllocateFixedArrayWithFiller(int length, PretenureFlag pretenure, |  2013       AllocateFixedArrayWithFiller(int length, PretenureFlag pretenure, | 
|  1927                                    Object* filler); |  2014                                    Object* filler); | 
|  1928  |  2015  | 
|  1929   // Allocate and partially initializes a String.  There are two String |  2016   // Allocate and partially initializes a String.  There are two String | 
|  1930   // encodings: one-byte and two-byte.  These functions allocate a string of |  2017   // encodings: one-byte and two-byte.  These functions allocate a string of | 
|  1931   // the given length and set its map and length fields.  The characters of |  2018   // the given length and set its map and length fields.  The characters of | 
|  1932   // the string are uninitialized. |  2019   // the string are uninitialized. | 
|  1933   MUST_USE_RESULT AllocationResult |  2020   MUST_USE_RESULT AllocationResult | 
|  1934       AllocateRawOneByteString(int length, PretenureFlag pretenure); |  2021       AllocateRawOneByteString(int length, PretenureFlag pretenure); | 
|  1935   MUST_USE_RESULT AllocationResult |  2022   MUST_USE_RESULT AllocationResult | 
|  1936       AllocateRawTwoByteString(int length, PretenureFlag pretenure); |  2023       AllocateRawTwoByteString(int length, PretenureFlag pretenure); | 
|  1937  |  2024  | 
|  1938   bool CreateInitialMaps(); |  | 
|  1939   void CreateInitialObjects(); |  | 
|  1940  |  | 
|  1941   // Allocates an internalized string in old space based on the character |  2025   // Allocates an internalized string in old space based on the character | 
|  1942   // stream. |  2026   // stream. | 
|  1943   MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8( |  2027   MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8( | 
|  1944       Vector<const char> str, int chars, uint32_t hash_field); |  2028       Vector<const char> str, int chars, uint32_t hash_field); | 
|  1945  |  2029  | 
|  1946   MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString( |  2030   MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString( | 
|  1947       Vector<const uint8_t> str, uint32_t hash_field); |  2031       Vector<const uint8_t> str, uint32_t hash_field); | 
|  1948  |  2032  | 
|  1949   MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString( |  2033   MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString( | 
|  1950       Vector<const uc16> str, uint32_t hash_field); |  2034       Vector<const uc16> str, uint32_t hash_field); | 
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
|  1997   MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src); |  2081   MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src); | 
|  1998  |  2082  | 
|  1999   // Make a copy of src, set the map, and return the copy. |  2083   // Make a copy of src, set the map, and return the copy. | 
|  2000   MUST_USE_RESULT AllocationResult |  2084   MUST_USE_RESULT AllocationResult | 
|  2001       CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, Map* map); |  2085       CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, Map* map); | 
|  2002  |  2086  | 
|  2003   // Allocates a fixed double array with uninitialized values. Returns |  2087   // Allocates a fixed double array with uninitialized values. Returns | 
|  2004   MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray( |  2088   MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray( | 
|  2005       int length, PretenureFlag pretenure = NOT_TENURED); |  2089       int length, PretenureFlag pretenure = NOT_TENURED); | 
|  2006  |  2090  | 
|  2007   // These five Create*EntryStub functions are here and forced to not be inlined |  | 
|  2008   // because of a gcc-4.4 bug that assigns wrong vtable entries. |  | 
|  2009   NO_INLINE(void CreateJSEntryStub()); |  | 
|  2010   NO_INLINE(void CreateJSConstructEntryStub()); |  | 
|  2011  |  | 
|  2012   void CreateFixedStubs(); |  | 
|  2013  |  | 
|  2014   // Allocate empty fixed array. |  2091   // Allocate empty fixed array. | 
|  2015   MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray(); |  2092   MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray(); | 
|  2016  |  2093  | 
|  2017   // Allocate empty fixed typed array of given type. |  2094   // Allocate empty fixed typed array of given type. | 
|  2018   MUST_USE_RESULT AllocationResult |  2095   MUST_USE_RESULT AllocationResult | 
|  2019       AllocateEmptyFixedTypedArray(ExternalArrayType array_type); |  2096       AllocateEmptyFixedTypedArray(ExternalArrayType array_type); | 
|  2020  |  2097  | 
|  2021   // Allocate a tenured simple cell. |  2098   // Allocate a tenured simple cell. | 
|  2022   MUST_USE_RESULT AllocationResult AllocateCell(Object* value); |  2099   MUST_USE_RESULT AllocationResult AllocateCell(Object* value); | 
|  2023  |  2100  | 
|  2024   // Allocate a tenured JS global property cell initialized with the hole. |  2101   // Allocate a tenured JS global property cell initialized with the hole. | 
|  2025   MUST_USE_RESULT AllocationResult AllocatePropertyCell(); |  2102   MUST_USE_RESULT AllocationResult AllocatePropertyCell(); | 
|  2026  |  2103  | 
|  2027   MUST_USE_RESULT AllocationResult AllocateWeakCell(HeapObject* value); |  2104   MUST_USE_RESULT AllocationResult AllocateWeakCell(HeapObject* value); | 
|  2028  |  2105  | 
|  2029   // Allocates a new utility object in the old generation. |  2106   // Allocates a new utility object in the old generation. | 
|  2030   MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type); |  2107   MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type); | 
|  2031  |  2108  | 
|  2032   // Allocates a new foreign object. |  2109   // Allocates a new foreign object. | 
|  2033   MUST_USE_RESULT AllocationResult |  2110   MUST_USE_RESULT AllocationResult | 
|  2034       AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED); |  2111       AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED); | 
|  2035  |  2112  | 
|  2036   MUST_USE_RESULT AllocationResult |  2113   MUST_USE_RESULT AllocationResult | 
|  2037       AllocateCode(int object_size, bool immovable); |  2114       AllocateCode(int object_size, bool immovable); | 
|  2038  |  2115  | 
|  2039   MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key); |  2116   MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key); | 
|  2040  |  2117  | 
|  2041   MUST_USE_RESULT AllocationResult InternalizeString(String* str); |  2118   MUST_USE_RESULT AllocationResult InternalizeString(String* str); | 
|  2042  |  2119  | 
|  2043   // Performs a minor collection in new generation. |  2120   // The amount of external memory registered through the API kept alive | 
|  2044   void Scavenge(); |  2121   // by global handles | 
 |  2122   int64_t amount_of_external_allocated_memory_; | 
|  2045  |  2123  | 
|  2046   // Commits from space if it is uncommitted. |  2124   // Caches the amount of external memory registered at the last global gc. | 
|  2047   void EnsureFromSpaceIsCommitted(); |  2125   int64_t amount_of_external_allocated_memory_at_last_global_gc_; | 
|  2048  |  2126  | 
|  2049   // Uncommit unused semi space. |  2127   // This can be calculated directly from a pointer to the heap; however, it is | 
|  2050   bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); } |  2128   // more expedient to get at the isolate directly from within Heap methods. | 
 |  2129   Isolate* isolate_; | 
|  2051  |  2130  | 
|  2052   // Fill in bogus values in from space |  2131   Object* roots_[kRootListLength]; | 
|  2053   void ZapFromSpace(); |  | 
|  2054  |  2132  | 
|  2055   static String* UpdateNewSpaceReferenceInExternalStringTableEntry( |  2133   size_t code_range_size_; | 
|  2056       Heap* heap, Object** pointer); |  2134   int reserved_semispace_size_; | 
 |  2135   int max_semi_space_size_; | 
 |  2136   int initial_semispace_size_; | 
 |  2137   int target_semispace_size_; | 
 |  2138   intptr_t max_old_generation_size_; | 
 |  2139   intptr_t initial_old_generation_size_; | 
 |  2140   bool old_generation_size_configured_; | 
 |  2141   intptr_t max_executable_size_; | 
 |  2142   intptr_t maximum_committed_; | 
|  2057  |  2143  | 
|  2058   Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front); |  2144   // For keeping track of how much data has survived | 
|  2059   static void ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page, |  2145   // scavenge since last new space expansion. | 
|  2060                                           StoreBufferEvent event); |  2146   int survived_since_last_expansion_; | 
|  2061  |  2147  | 
|  2062   // Performs a major collection in the whole heap. |  2148   // ... and since the last scavenge. | 
|  2063   void MarkCompact(); |  2149   int survived_last_scavenge_; | 
|  2064  |  2150  | 
|  2065   // Code to be run before and after mark-compact. |  2151   int always_allocate_scope_depth_; | 
|  2066   void MarkCompactPrologue(); |  | 
|  2067   void MarkCompactEpilogue(); |  | 
|  2068  |  2152  | 
|  2069   void ProcessNativeContexts(WeakObjectRetainer* retainer); |  2153   // For keeping track of context disposals. | 
|  2070   void ProcessAllocationSites(WeakObjectRetainer* retainer); |  2154   int contexts_disposed_; | 
|  2071  |  2155  | 
|  2072   // Deopts all code that contains allocation instruction which are tenured or |  2156   int global_ic_age_; | 
|  2073   // not tenured. Moreover it clears the pretenuring allocation site statistics. |  | 
|  2074   void ResetAllAllocationSitesDependentCode(PretenureFlag flag); |  | 
|  2075  |  2157  | 
|  2076   // Evaluates local pretenuring for the old space and calls |  2158   int scan_on_scavenge_pages_; | 
|  2077   // ResetAllTenuredAllocationSitesDependentCode if too many objects died in |  | 
|  2078   // the old space. |  | 
|  2079   void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc); |  | 
|  2080  |  2159  | 
|  2081   // Called on heap tear-down. Frees all remaining ArrayBuffer backing stores. |  2160   NewSpace new_space_; | 
|  2082   void TearDownArrayBuffers(); |  2161   OldSpace* old_space_; | 
 |  2162   OldSpace* code_space_; | 
 |  2163   MapSpace* map_space_; | 
 |  2164   LargeObjectSpace* lo_space_; | 
 |  2165   HeapState gc_state_; | 
 |  2166   int gc_post_processing_depth_; | 
 |  2167   Address new_space_top_after_last_gc_; | 
|  2083  |  2168  | 
|  2084   // These correspond to the non-Helper versions. |  2169   // Returns the amount of external memory registered since last global gc. | 
|  2085   void RegisterNewArrayBufferHelper(std::map<void*, size_t>& live_buffers, |  2170   int64_t PromotedExternalMemorySize(); | 
|  2086                                     void* data, size_t length); |  | 
|  2087   void UnregisterArrayBufferHelper( |  | 
|  2088       std::map<void*, size_t>& live_buffers, |  | 
|  2089       std::map<void*, size_t>& not_yet_discovered_buffers, void* data); |  | 
|  2090   void RegisterLiveArrayBufferHelper( |  | 
|  2091       std::map<void*, size_t>& not_yet_discovered_buffers, void* data); |  | 
|  2092   size_t FreeDeadArrayBuffersHelper( |  | 
|  2093       Isolate* isolate, std::map<void*, size_t>& live_buffers, |  | 
|  2094       std::map<void*, size_t>& not_yet_discovered_buffers); |  | 
|  2095   void TearDownArrayBuffersHelper( |  | 
|  2096       Isolate* isolate, std::map<void*, size_t>& live_buffers, |  | 
|  2097       std::map<void*, size_t>& not_yet_discovered_buffers); |  | 
|  2098  |  2171  | 
|  2099   // Record statistics before and after garbage collection. |  2172   // How many "runtime allocations" happened. | 
|  2100   void ReportStatisticsBeforeGC(); |  2173   uint32_t allocations_count_; | 
|  2101   void ReportStatisticsAfterGC(); |  2174  | 
 |  2175   // Running hash over allocations performed. | 
 |  2176   uint32_t raw_allocations_hash_; | 
 |  2177  | 
 |  2178   // Countdown counter, dumps allocation hash when 0. | 
 |  2179   uint32_t dump_allocations_hash_countdown_; | 
 |  2180  | 
 |  2181   // How many mark-sweep collections happened. | 
 |  2182   unsigned int ms_count_; | 
 |  2183  | 
 |  2184   // How many gc happened. | 
 |  2185   unsigned int gc_count_; | 
 |  2186  | 
 |  2187   // For post mortem debugging. | 
 |  2188   int remembered_unmapped_pages_index_; | 
 |  2189   Address remembered_unmapped_pages_[kRememberedUnmappedPages]; | 
 |  2190  | 
 |  2191 #ifdef DEBUG | 
 |  2192   // If the --gc-interval flag is set to a positive value, this | 
 |  2193   // variable holds the value indicating the number of allocations | 
 |  2194   // remain until the next failure and garbage collection. | 
 |  2195   int allocation_timeout_; | 
 |  2196 #endif  // DEBUG | 
 |  2197  | 
 |  2198   // Limit that triggers a global GC on the next (normally caused) GC.  This | 
 |  2199   // is checked when we have already decided to do a GC to help determine | 
 |  2200   // which collector to invoke, before expanding a paged space in the old | 
 |  2201   // generation and on every allocation in large object space. | 
 |  2202   intptr_t old_generation_allocation_limit_; | 
 |  2203  | 
 |  2204   // Indicates that an allocation has failed in the old generation since the | 
 |  2205   // last GC. | 
 |  2206   bool old_gen_exhausted_; | 
 |  2207  | 
 |  2208   // Indicates that memory usage is more important than latency. | 
 |  2209   // TODO(ulan): Merge it with memory reducer once chromium:490559 is fixed. | 
 |  2210   bool optimize_for_memory_usage_; | 
 |  2211  | 
 |  2212   // Indicates that inline bump-pointer allocation has been globally disabled | 
 |  2213   // for all spaces. This is used to disable allocations in generated code. | 
 |  2214   bool inline_allocation_disabled_; | 
 |  2215  | 
 |  2216   // Weak list heads, threaded through the objects. | 
 |  2217   // List heads are initialized lazily and contain the undefined_value at start. | 
 |  2218   Object* native_contexts_list_; | 
 |  2219   Object* allocation_sites_list_; | 
 |  2220  | 
 |  2221   // List of encountered weak collections (JSWeakMap and JSWeakSet) during | 
 |  2222   // marking. It is initialized during marking, destroyed after marking and | 
 |  2223   // contains Smi(0) while marking is not active. | 
 |  2224   Object* encountered_weak_collections_; | 
 |  2225  | 
 |  2226   Object* encountered_weak_cells_; | 
 |  2227  | 
 |  2228   StoreBufferRebuilder store_buffer_rebuilder_; | 
 |  2229  | 
 |  2230   List<GCCallbackPair> gc_epilogue_callbacks_; | 
 |  2231   List<GCCallbackPair> gc_prologue_callbacks_; | 
|  2102  |  2232  | 
|  2103   // Total RegExp code ever generated |  2233   // Total RegExp code ever generated | 
|  2104   double total_regexp_code_generated_; |  2234   double total_regexp_code_generated_; | 
|  2105  |  2235  | 
|  2106   int deferred_counters_[v8::Isolate::kUseCounterFeatureCount]; |  2236   int deferred_counters_[v8::Isolate::kUseCounterFeatureCount]; | 
|  2107  |  2237  | 
|  2108   GCTracer* tracer_; |  2238   GCTracer* tracer_; | 
|  2109  |  2239  | 
|  2110   // Creates and installs the full-sized number string cache. |  | 
|  2111   int FullSizeNumberStringCacheLength(); |  | 
|  2112   // Flush the number to string cache. |  | 
|  2113   void FlushNumberStringCache(); |  | 
|  2114  |  | 
|  2115   // Sets used allocation sites entries to undefined. |  | 
|  2116   void FlushAllocationSitesScratchpad(); |  | 
|  2117  |  | 
|  2118   // Initializes the allocation sites scratchpad with undefined values. |  | 
|  2119   void InitializeAllocationSitesScratchpad(); |  | 
|  2120  |  | 
|  2121   // Adds an allocation site to the scratchpad if there is space left. |  | 
|  2122   void AddAllocationSiteToScratchpad(AllocationSite* site, |  | 
|  2123                                      ScratchpadSlotMode mode); |  | 
|  2124  |  | 
|  2125   void UpdateSurvivalStatistics(int start_new_space_size); |  | 
|  2126  |  | 
|  2127   static const int kYoungSurvivalRateHighThreshold = 90; |  | 
|  2128   static const int kYoungSurvivalRateAllowedDeviation = 15; |  | 
|  2129  |  | 
|  2130   static const int kOldSurvivalRateLowThreshold = 10; |  | 
|  2131  |  | 
|  2132   int high_survival_rate_period_length_; |  2240   int high_survival_rate_period_length_; | 
|  2133   intptr_t promoted_objects_size_; |  2241   intptr_t promoted_objects_size_; | 
|  2134   double promotion_ratio_; |  2242   double promotion_ratio_; | 
|  2135   double promotion_rate_; |  2243   double promotion_rate_; | 
|  2136   intptr_t semi_space_copied_object_size_; |  2244   intptr_t semi_space_copied_object_size_; | 
|  2137   intptr_t previous_semi_space_copied_object_size_; |  2245   intptr_t previous_semi_space_copied_object_size_; | 
|  2138   double semi_space_copied_rate_; |  2246   double semi_space_copied_rate_; | 
|  2139   int nodes_died_in_new_space_; |  2247   int nodes_died_in_new_space_; | 
|  2140   int nodes_copied_in_new_space_; |  2248   int nodes_copied_in_new_space_; | 
|  2141   int nodes_promoted_; |  2249   int nodes_promoted_; | 
|  2142  |  2250  | 
|  2143   // This is the pretenuring trigger for allocation sites that are in maybe |  2251   // This is the pretenuring trigger for allocation sites that are in maybe | 
|  2144   // tenure state. When we switched to the maximum new space size we deoptimize |  2252   // tenure state. When we switched to the maximum new space size we deoptimize | 
|  2145   // the code that belongs to the allocation site and derive the lifetime |  2253   // the code that belongs to the allocation site and derive the lifetime | 
|  2146   // of the allocation site. |  2254   // of the allocation site. | 
|  2147   unsigned int maximum_size_scavenges_; |  2255   unsigned int maximum_size_scavenges_; | 
|  2148  |  2256  | 
|  2149   // TODO(hpayer): Allocation site pretenuring may make this method obsolete. |  | 
|  2150   // Re-visit incremental marking heuristics. |  | 
|  2151   bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; } |  | 
|  2152  |  | 
|  2153   void ConfigureInitialOldGenerationSize(); |  | 
|  2154  |  | 
|  2155   void SelectScavengingVisitorsTable(); |  | 
|  2156  |  | 
|  2157   bool HasLowYoungGenerationAllocationRate(); |  | 
|  2158   bool HasLowOldGenerationAllocationRate(); |  | 
|  2159   double YoungGenerationMutatorUtilization(); |  | 
|  2160   double OldGenerationMutatorUtilization(); |  | 
|  2161  |  | 
|  2162   void ReduceNewSpaceSize(); |  | 
|  2163  |  | 
|  2164   bool TryFinalizeIdleIncrementalMarking( |  | 
|  2165       double idle_time_in_ms, size_t size_of_objects, |  | 
|  2166       size_t mark_compact_speed_in_bytes_per_ms); |  | 
|  2167  |  | 
|  2168   GCIdleTimeHandler::HeapState ComputeHeapState(); |  | 
|  2169  |  | 
|  2170   bool PerformIdleTimeAction(GCIdleTimeAction action, |  | 
|  2171                              GCIdleTimeHandler::HeapState heap_state, |  | 
|  2172                              double deadline_in_ms); |  | 
|  2173  |  | 
|  2174   void IdleNotificationEpilogue(GCIdleTimeAction action, |  | 
|  2175                                 GCIdleTimeHandler::HeapState heap_state, |  | 
|  2176                                 double start_ms, double deadline_in_ms); |  | 
|  2177   void CheckAndNotifyBackgroundIdleNotification(double idle_time_in_ms, |  | 
|  2178                                                 double now_ms); |  | 
|  2179  |  | 
|  2180   void ClearObjectStats(bool clear_last_time_stats = false); |  | 
|  2181  |  | 
|  2182   inline void UpdateAllocationsHash(HeapObject* object); |  | 
|  2183   inline void UpdateAllocationsHash(uint32_t value); |  | 
|  2184   inline void PrintAlloctionsHash(); |  | 
|  2185  |  | 
|  2186   void AddToRingBuffer(const char* string); |  | 
|  2187   void GetFromRingBuffer(char* buffer); |  | 
|  2188  |  | 
|  2189   // Object counts and used memory by InstanceType |  2257   // Object counts and used memory by InstanceType | 
|  2190   size_t object_counts_[OBJECT_STATS_COUNT]; |  2258   size_t object_counts_[OBJECT_STATS_COUNT]; | 
|  2191   size_t object_counts_last_time_[OBJECT_STATS_COUNT]; |  2259   size_t object_counts_last_time_[OBJECT_STATS_COUNT]; | 
|  2192   size_t object_sizes_[OBJECT_STATS_COUNT]; |  2260   size_t object_sizes_[OBJECT_STATS_COUNT]; | 
|  2193   size_t object_sizes_last_time_[OBJECT_STATS_COUNT]; |  2261   size_t object_sizes_last_time_[OBJECT_STATS_COUNT]; | 
|  2194  |  2262  | 
|  2195   // Maximum GC pause. |  2263   // Maximum GC pause. | 
|  2196   double max_gc_pause_; |  2264   double max_gc_pause_; | 
|  2197  |  2265  | 
|  2198   // Total time spent in GC. |  2266   // Total time spent in GC. | 
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
|  2241   size_t old_generation_allocation_counter_; |  2309   size_t old_generation_allocation_counter_; | 
|  2242  |  2310  | 
|  2243   // The size of objects in old generation after the last MarkCompact GC. |  2311   // The size of objects in old generation after the last MarkCompact GC. | 
|  2244   size_t old_generation_size_at_last_gc_; |  2312   size_t old_generation_size_at_last_gc_; | 
|  2245  |  2313  | 
|  2246   // If the --deopt_every_n_garbage_collections flag is set to a positive value, |  2314   // If the --deopt_every_n_garbage_collections flag is set to a positive value, | 
|  2247   // this variable holds the number of garbage collections since the last |  2315   // this variable holds the number of garbage collections since the last | 
|  2248   // deoptimization triggered by garbage collection. |  2316   // deoptimization triggered by garbage collection. | 
|  2249   int gcs_since_last_deopt_; |  2317   int gcs_since_last_deopt_; | 
|  2250  |  2318  | 
|  2251   static const int kAllocationSiteScratchpadSize = 256; |  | 
|  2252   int allocation_sites_scratchpad_length_; |  2319   int allocation_sites_scratchpad_length_; | 
|  2253  |  2320  | 
|  2254   char trace_ring_buffer_[kTraceRingBufferSize]; |  2321   char trace_ring_buffer_[kTraceRingBufferSize]; | 
|  2255   // If it's not full then the data is from 0 to ring_buffer_end_.  If it's |  2322   // If it's not full then the data is from 0 to ring_buffer_end_.  If it's | 
|  2256   // full then the data is from ring_buffer_end_ to the end of the buffer and |  2323   // full then the data is from ring_buffer_end_ to the end of the buffer and | 
|  2257   // from 0 to ring_buffer_end_. |  2324   // from 0 to ring_buffer_end_. | 
|  2258   bool ring_buffer_full_; |  2325   bool ring_buffer_full_; | 
|  2259   size_t ring_buffer_end_; |  2326   size_t ring_buffer_end_; | 
|  2260  |  2327  | 
|  2261   static const int kMaxMarkCompactsInIdleRound = 7; |  | 
|  2262   static const int kIdleScavengeThreshold = 5; |  | 
|  2263  |  | 
|  2264   // Shared state read by the scavenge collector and set by ScavengeObject. |  2328   // Shared state read by the scavenge collector and set by ScavengeObject. | 
|  2265   PromotionQueue promotion_queue_; |  2329   PromotionQueue promotion_queue_; | 
|  2266  |  2330  | 
|  2267   // Flag is set when the heap has been configured.  The heap can be repeatedly |  2331   // Flag is set when the heap has been configured.  The heap can be repeatedly | 
|  2268   // configured through the API until it is set up. |  2332   // configured through the API until it is set up. | 
|  2269   bool configured_; |  2333   bool configured_; | 
|  2270  |  2334  | 
|  2271   // Currently set GC flags that are respected by all GC components. |  2335   // Currently set GC flags that are respected by all GC components. | 
|  2272   int current_gc_flags_; |  2336   int current_gc_flags_; | 
|  2273  |  2337  | 
| (...skipping 23 matching lines...) Expand all  Loading... | 
|  2297  |  2361  | 
|  2298   // To be able to free memory held by ArrayBuffers during scavenge as well, we |  2362   // To be able to free memory held by ArrayBuffers during scavenge as well, we | 
|  2299   // have a separate list of allocated memory held by ArrayBuffers in new space. |  2363   // have a separate list of allocated memory held by ArrayBuffers in new space. | 
|  2300   // |  2364   // | 
|  2301   // Since mark/compact also evacuates the new space, all pointers in the |  2365   // Since mark/compact also evacuates the new space, all pointers in the | 
|  2302   // |live_array_buffers_for_scavenge_| list are also in the |  2366   // |live_array_buffers_for_scavenge_| list are also in the | 
|  2303   // |live_array_buffers_| list. |  2367   // |live_array_buffers_| list. | 
|  2304   std::map<void*, size_t> live_array_buffers_for_scavenge_; |  2368   std::map<void*, size_t> live_array_buffers_for_scavenge_; | 
|  2305   std::map<void*, size_t> not_yet_discovered_array_buffers_for_scavenge_; |  2369   std::map<void*, size_t> not_yet_discovered_array_buffers_for_scavenge_; | 
|  2306  |  2370  | 
|  2307   struct StrongRootsList; |  | 
|  2308   StrongRootsList* strong_roots_list_; |  2371   StrongRootsList* strong_roots_list_; | 
|  2309  |  2372  | 
|  2310   friend class AlwaysAllocateScope; |  2373   friend class AlwaysAllocateScope; | 
|  2311   friend class Bootstrapper; |  2374   friend class Bootstrapper; | 
|  2312   friend class Deserializer; |  2375   friend class Deserializer; | 
|  2313   friend class Factory; |  2376   friend class Factory; | 
|  2314   friend class GCCallbacksScope; |  2377   friend class GCCallbacksScope; | 
|  2315   friend class GCTracer; |  2378   friend class GCTracer; | 
|  2316   friend class HeapIterator; |  2379   friend class HeapIterator; | 
|  2317   friend class IncrementalMarking; |  2380   friend class IncrementalMarking; | 
| (...skipping 369 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
|  2687   DisallowHeapAllocation no_allocation;  // i.e. no gc allowed. |  2750   DisallowHeapAllocation no_allocation;  // i.e. no gc allowed. | 
|  2688  |  2751  | 
|  2689  private: |  2752  private: | 
|  2690   DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer); |  2753   DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer); | 
|  2691 }; |  2754 }; | 
|  2692 #endif  // DEBUG |  2755 #endif  // DEBUG | 
|  2693 } |  2756 } | 
|  2694 }  // namespace v8::internal |  2757 }  // namespace v8::internal | 
|  2695  |  2758  | 
|  2696 #endif  // V8_HEAP_HEAP_H_ |  2759 #endif  // V8_HEAP_HEAP_H_ | 
| OLD | NEW |