| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_HEAP_HEAP_H_ | 5 #ifndef V8_HEAP_HEAP_H_ |
| 6 #define V8_HEAP_HEAP_H_ | 6 #define V8_HEAP_HEAP_H_ |
| 7 | 7 |
| 8 #include <cmath> | 8 #include <cmath> |
| 9 #include <map> | 9 #include <map> |
| 10 | 10 |
| (...skipping 406 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 417 V(ForeignMap) \ | 417 V(ForeignMap) \ |
| 418 V(NeanderMap) \ | 418 V(NeanderMap) \ |
| 419 V(empty_string) \ | 419 V(empty_string) \ |
| 420 PRIVATE_SYMBOL_LIST(V) | 420 PRIVATE_SYMBOL_LIST(V) |
| 421 | 421 |
| 422 // Forward declarations. | 422 // Forward declarations. |
| 423 class HeapObjectsFilter; | 423 class HeapObjectsFilter; |
| 424 class HeapStats; | 424 class HeapStats; |
| 425 class Isolate; | 425 class Isolate; |
| 426 class MemoryReducer; | 426 class MemoryReducer; |
| 427 class ObjectStats; |
| 427 class WeakObjectRetainer; | 428 class WeakObjectRetainer; |
| 428 | 429 |
| 429 | 430 |
| 430 // A queue of objects promoted during scavenge. Each object is accompanied | 431 // A queue of objects promoted during scavenge. Each object is accompanied |
| 431 // by it's size to avoid dereferencing a map pointer for scanning. | 432 // by it's size to avoid dereferencing a map pointer for scanning. |
| 432 // The last page in to-space is used for the promotion queue. On conflict | 433 // The last page in to-space is used for the promotion queue. On conflict |
| 433 // during scavenge, the promotion queue is allocated externally and all | 434 // during scavenge, the promotion queue is allocated externally and all |
| 434 // entries are copied to the external queue. | 435 // entries are copied to the external queue. |
| 435 class PromotionQueue { | 436 class PromotionQueue { |
| 436 public: | 437 public: |
| (...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 579 // Indicates whether live bytes adjustment is triggered | 580 // Indicates whether live bytes adjustment is triggered |
| 580 // - from within the GC code before sweeping started (SEQUENTIAL_TO_SWEEPER), | 581 // - from within the GC code before sweeping started (SEQUENTIAL_TO_SWEEPER), |
| 581 // - or from within GC (CONCURRENT_TO_SWEEPER), | 582 // - or from within GC (CONCURRENT_TO_SWEEPER), |
| 582 // - or mutator code (CONCURRENT_TO_SWEEPER). | 583 // - or mutator code (CONCURRENT_TO_SWEEPER). |
| 583 enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER }; | 584 enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER }; |
| 584 | 585 |
| 585 enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT }; | 586 enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT }; |
| 586 | 587 |
| 587 enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT }; | 588 enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT }; |
| 588 | 589 |
| 589 // ObjectStats are kept in two arrays, counts and sizes. Related stats are | |
| 590 // stored in a contiguous linear buffer. Stats groups are stored one after | |
| 591 // another. | |
| 592 enum { | |
| 593 FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1, | |
| 594 FIRST_FIXED_ARRAY_SUB_TYPE = | |
| 595 FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS, | |
| 596 FIRST_CODE_AGE_SUB_TYPE = | |
| 597 FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1, | |
| 598 OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1 | |
| 599 }; | |
| 600 | |
| 601 // Taking this lock prevents the GC from entering a phase that relocates | 590 // Taking this lock prevents the GC from entering a phase that relocates |
| 602 // object references. | 591 // object references. |
| 603 class RelocationLock { | 592 class RelocationLock { |
| 604 public: | 593 public: |
| 605 explicit RelocationLock(Heap* heap) : heap_(heap) { | 594 explicit RelocationLock(Heap* heap) : heap_(heap) { |
| 606 heap_->relocation_mutex_.Lock(); | 595 heap_->relocation_mutex_.Lock(); |
| 607 } | 596 } |
| 608 | 597 |
| 609 ~RelocationLock() { heap_->relocation_mutex_.Unlock(); } | 598 ~RelocationLock() { heap_->relocation_mutex_.Unlock(); } |
| 610 | 599 |
| (...skipping 291 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 902 // Number of "runtime allocations" done so far. | 891 // Number of "runtime allocations" done so far. |
| 903 uint32_t allocations_count() { return allocations_count_; } | 892 uint32_t allocations_count() { return allocations_count_; } |
| 904 | 893 |
| 905 // Returns deterministic "time" value in ms. Works only with | 894 // Returns deterministic "time" value in ms. Works only with |
| 906 // FLAG_verify_predictable. | 895 // FLAG_verify_predictable. |
| 907 double synthetic_time() { return allocations_count() / 2.0; } | 896 double synthetic_time() { return allocations_count() / 2.0; } |
| 908 | 897 |
| 909 // Print short heap statistics. | 898 // Print short heap statistics. |
| 910 void PrintShortHeapStatistics(); | 899 void PrintShortHeapStatistics(); |
| 911 | 900 |
| 912 size_t object_count_last_gc(size_t index) { | |
| 913 return index < OBJECT_STATS_COUNT ? object_counts_last_time_[index] : 0; | |
| 914 } | |
| 915 | |
| 916 size_t object_size_last_gc(size_t index) { | |
| 917 return index < OBJECT_STATS_COUNT ? object_sizes_last_time_[index] : 0; | |
| 918 } | |
| 919 | |
| 920 inline HeapState gc_state() { return gc_state_; } | 901 inline HeapState gc_state() { return gc_state_; } |
| 921 | 902 |
| 922 inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; } | 903 inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; } |
| 923 | 904 |
| 924 // If an object has an AllocationMemento trailing it, return it, otherwise | 905 // If an object has an AllocationMemento trailing it, return it, otherwise |
| 925 // return NULL; | 906 // return NULL; |
| 926 inline AllocationMemento* FindAllocationMemento(HeapObject* object); | 907 inline AllocationMemento* FindAllocationMemento(HeapObject* object); |
| 927 | 908 |
| 928 // Returns false if not able to reserve. | 909 // Returns false if not able to reserve. |
| 929 bool ReserveSpace(Reservation* reservations); | 910 bool ReserveSpace(Reservation* reservations); |
| (...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1002 int64_t amount_of_external_allocated_memory() { | 983 int64_t amount_of_external_allocated_memory() { |
| 1003 return amount_of_external_allocated_memory_; | 984 return amount_of_external_allocated_memory_; |
| 1004 } | 985 } |
| 1005 | 986 |
| 1006 void DeoptMarkedAllocationSites(); | 987 void DeoptMarkedAllocationSites(); |
| 1007 | 988 |
| 1008 bool DeoptMaybeTenuredAllocationSites() { | 989 bool DeoptMaybeTenuredAllocationSites() { |
| 1009 return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0; | 990 return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0; |
| 1010 } | 991 } |
| 1011 | 992 |
| 1012 void RecordObjectStats(InstanceType type, size_t size) { | |
| 1013 DCHECK(type <= LAST_TYPE); | |
| 1014 object_counts_[type]++; | |
| 1015 object_sizes_[type] += size; | |
| 1016 } | |
| 1017 | |
| 1018 void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) { | |
| 1019 int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type; | |
| 1020 int code_age_index = | |
| 1021 FIRST_CODE_AGE_SUB_TYPE + code_age - Code::kFirstCodeAge; | |
| 1022 DCHECK(code_sub_type_index >= FIRST_CODE_KIND_SUB_TYPE && | |
| 1023 code_sub_type_index < FIRST_CODE_AGE_SUB_TYPE); | |
| 1024 DCHECK(code_age_index >= FIRST_CODE_AGE_SUB_TYPE && | |
| 1025 code_age_index < OBJECT_STATS_COUNT); | |
| 1026 object_counts_[code_sub_type_index]++; | |
| 1027 object_sizes_[code_sub_type_index] += size; | |
| 1028 object_counts_[code_age_index]++; | |
| 1029 object_sizes_[code_age_index] += size; | |
| 1030 } | |
| 1031 | |
| 1032 void RecordFixedArraySubTypeStats(int array_sub_type, size_t size) { | |
| 1033 DCHECK(array_sub_type <= LAST_FIXED_ARRAY_SUB_TYPE); | |
| 1034 object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++; | |
| 1035 object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size; | |
| 1036 } | |
| 1037 | |
| 1038 void TraceObjectStats(); | |
| 1039 void TraceObjectStat(const char* name, int count, int size, double time); | |
| 1040 void CheckpointObjectStats(); | |
| 1041 bool GetObjectTypeName(size_t index, const char** object_type, | |
| 1042 const char** object_sub_type); | |
| 1043 | |
| 1044 void AddWeakObjectToCodeDependency(Handle<HeapObject> obj, | 993 void AddWeakObjectToCodeDependency(Handle<HeapObject> obj, |
| 1045 Handle<DependentCode> dep); | 994 Handle<DependentCode> dep); |
| 1046 | 995 |
| 1047 DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj); | 996 DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj); |
| 1048 | 997 |
| 1049 void AddRetainedMap(Handle<Map> map); | 998 void AddRetainedMap(Handle<Map> map); |
| 1050 | 999 |
| 1051 // This event is triggered after successful allocation of a new object made | 1000 // This event is triggered after successful allocation of a new object made |
| 1052 // by runtime. Allocations of target space for object evacuation do not | 1001 // by runtime. Allocations of target space for object evacuation do not |
| 1053 // trigger the event. In order to track ALL allocations one must turn off | 1002 // trigger the event. In order to track ALL allocations one must turn off |
| (...skipping 323 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1377 // area and unused area). | 1326 // area and unused area). |
| 1378 bool Contains(Address addr); | 1327 bool Contains(Address addr); |
| 1379 bool Contains(HeapObject* value); | 1328 bool Contains(HeapObject* value); |
| 1380 | 1329 |
| 1381 // Checks whether an address/object in a space. | 1330 // Checks whether an address/object in a space. |
| 1382 // Currently used by tests, serialization and heap verification only. | 1331 // Currently used by tests, serialization and heap verification only. |
| 1383 bool InSpace(Address addr, AllocationSpace space); | 1332 bool InSpace(Address addr, AllocationSpace space); |
| 1384 bool InSpace(HeapObject* value, AllocationSpace space); | 1333 bool InSpace(HeapObject* value, AllocationSpace space); |
| 1385 | 1334 |
| 1386 // =========================================================================== | 1335 // =========================================================================== |
| 1336 // Object statistics tracking. =============================================== |
| 1337 // =========================================================================== |
| 1338 |
| 1339 // Returns the number of buckets used by object statistics tracking during a |
| 1340 // major GC. Note that the following methods fail gracefully when the bounds |
| 1341 // are exceeded though. |
| 1342 size_t NumberOfTrackedHeapObjectTypes(); |
| 1343 |
| 1344 // Returns object statistics about count and size at the last major GC. |
| 1345 // Objects are being grouped into buckets that roughly resemble existing |
| 1346 // instance types. |
| 1347 size_t ObjectCountAtLastGC(size_t index); |
| 1348 size_t ObjectSizeAtLastGC(size_t index); |
| 1349 |
| 1350 // Retrieves names of buckets used by object statistics tracking. |
| 1351 bool GetObjectTypeName(size_t index, const char** object_type, |
| 1352 const char** object_sub_type); |
| 1353 |
| 1354 // =========================================================================== |
| 1387 // GC statistics. ============================================================ | 1355 // GC statistics. ============================================================ |
| 1388 // =========================================================================== | 1356 // =========================================================================== |
| 1389 | 1357 |
| 1390 // Returns the maximum amount of memory reserved for the heap. For | 1358 // Returns the maximum amount of memory reserved for the heap. For |
| 1391 // the young generation, we reserve 4 times the amount needed for a | 1359 // the young generation, we reserve 4 times the amount needed for a |
| 1392 // semi space. The young generation consists of two semi spaces and | 1360 // semi space. The young generation consists of two semi spaces and |
| 1393 // we reserve twice the amount needed for those in order to ensure | 1361 // we reserve twice the amount needed for those in order to ensure |
| 1394 // that new space can be aligned to its size. | 1362 // that new space can be aligned to its size. |
| 1395 intptr_t MaxReserved() { | 1363 intptr_t MaxReserved() { |
| 1396 return 4 * reserved_semispace_size_ + max_old_generation_size_; | 1364 return 4 * reserved_semispace_size_ + max_old_generation_size_; |
| (...skipping 436 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1833 bool PerformIdleTimeAction(GCIdleTimeAction action, | 1801 bool PerformIdleTimeAction(GCIdleTimeAction action, |
| 1834 GCIdleTimeHandler::HeapState heap_state, | 1802 GCIdleTimeHandler::HeapState heap_state, |
| 1835 double deadline_in_ms); | 1803 double deadline_in_ms); |
| 1836 | 1804 |
| 1837 void IdleNotificationEpilogue(GCIdleTimeAction action, | 1805 void IdleNotificationEpilogue(GCIdleTimeAction action, |
| 1838 GCIdleTimeHandler::HeapState heap_state, | 1806 GCIdleTimeHandler::HeapState heap_state, |
| 1839 double start_ms, double deadline_in_ms); | 1807 double start_ms, double deadline_in_ms); |
| 1840 void CheckAndNotifyBackgroundIdleNotification(double idle_time_in_ms, | 1808 void CheckAndNotifyBackgroundIdleNotification(double idle_time_in_ms, |
| 1841 double now_ms); | 1809 double now_ms); |
| 1842 | 1810 |
| 1843 void ClearObjectStats(bool clear_last_time_stats = false); | |
| 1844 | |
| 1845 inline void UpdateAllocationsHash(HeapObject* object); | 1811 inline void UpdateAllocationsHash(HeapObject* object); |
| 1846 inline void UpdateAllocationsHash(uint32_t value); | 1812 inline void UpdateAllocationsHash(uint32_t value); |
| 1847 void PrintAlloctionsHash(); | 1813 void PrintAlloctionsHash(); |
| 1848 | 1814 |
| 1849 void AddToRingBuffer(const char* string); | 1815 void AddToRingBuffer(const char* string); |
| 1850 void GetFromRingBuffer(char* buffer); | 1816 void GetFromRingBuffer(char* buffer); |
| 1851 | 1817 |
| 1852 // Attempt to over-approximate the weak closure by marking object groups and | 1818 // Attempt to over-approximate the weak closure by marking object groups and |
| 1853 // implicit references from global handles, but don't atomically complete | 1819 // implicit references from global handles, but don't atomically complete |
| 1854 // marking. If we continue to mark incrementally, we might have marked | 1820 // marking. If we continue to mark incrementally, we might have marked |
| (...skipping 412 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2267 int nodes_died_in_new_space_; | 2233 int nodes_died_in_new_space_; |
| 2268 int nodes_copied_in_new_space_; | 2234 int nodes_copied_in_new_space_; |
| 2269 int nodes_promoted_; | 2235 int nodes_promoted_; |
| 2270 | 2236 |
| 2271 // This is the pretenuring trigger for allocation sites that are in maybe | 2237 // This is the pretenuring trigger for allocation sites that are in maybe |
| 2272 // tenure state. When we switched to the maximum new space size we deoptimize | 2238 // tenure state. When we switched to the maximum new space size we deoptimize |
| 2273 // the code that belongs to the allocation site and derive the lifetime | 2239 // the code that belongs to the allocation site and derive the lifetime |
| 2274 // of the allocation site. | 2240 // of the allocation site. |
| 2275 unsigned int maximum_size_scavenges_; | 2241 unsigned int maximum_size_scavenges_; |
| 2276 | 2242 |
| 2277 // Object counts and used memory by InstanceType | |
| 2278 size_t object_counts_[OBJECT_STATS_COUNT]; | |
| 2279 size_t object_counts_last_time_[OBJECT_STATS_COUNT]; | |
| 2280 size_t object_sizes_[OBJECT_STATS_COUNT]; | |
| 2281 size_t object_sizes_last_time_[OBJECT_STATS_COUNT]; | |
| 2282 | |
| 2283 // Maximum GC pause. | 2243 // Maximum GC pause. |
| 2284 double max_gc_pause_; | 2244 double max_gc_pause_; |
| 2285 | 2245 |
| 2286 // Total time spent in GC. | 2246 // Total time spent in GC. |
| 2287 double total_gc_time_ms_; | 2247 double total_gc_time_ms_; |
| 2288 | 2248 |
| 2289 // Maximum size of objects alive after GC. | 2249 // Maximum size of objects alive after GC. |
| 2290 intptr_t max_alive_after_gc_; | 2250 intptr_t max_alive_after_gc_; |
| 2291 | 2251 |
| 2292 // Minimal interval between two subsequent collections. | 2252 // Minimal interval between two subsequent collections. |
| (...skipping 14 matching lines...) Expand all Loading... |
| 2307 MarkCompactCollector mark_compact_collector_; | 2267 MarkCompactCollector mark_compact_collector_; |
| 2308 | 2268 |
| 2309 StoreBuffer store_buffer_; | 2269 StoreBuffer store_buffer_; |
| 2310 | 2270 |
| 2311 IncrementalMarking incremental_marking_; | 2271 IncrementalMarking incremental_marking_; |
| 2312 | 2272 |
| 2313 GCIdleTimeHandler gc_idle_time_handler_; | 2273 GCIdleTimeHandler gc_idle_time_handler_; |
| 2314 | 2274 |
| 2315 MemoryReducer* memory_reducer_; | 2275 MemoryReducer* memory_reducer_; |
| 2316 | 2276 |
| 2277 ObjectStats* object_stats_; |
| 2278 |
| 2317 // These two counters are monotomically increasing and never reset. | 2279 // These two counters are monotomically increasing and never reset. |
| 2318 size_t full_codegen_bytes_generated_; | 2280 size_t full_codegen_bytes_generated_; |
| 2319 size_t crankshaft_codegen_bytes_generated_; | 2281 size_t crankshaft_codegen_bytes_generated_; |
| 2320 | 2282 |
| 2321 // This counter is increased before each GC and never reset. | 2283 // This counter is increased before each GC and never reset. |
| 2322 // To account for the bytes allocated since the last GC, use the | 2284 // To account for the bytes allocated since the last GC, use the |
| 2323 // NewSpaceAllocationCounter() function. | 2285 // NewSpaceAllocationCounter() function. |
| 2324 size_t new_space_allocation_counter_; | 2286 size_t new_space_allocation_counter_; |
| 2325 | 2287 |
| 2326 // This counter is increased before each GC and never reset. To | 2288 // This counter is increased before each GC and never reset. To |
| (...skipping 452 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2779 DisallowHeapAllocation no_allocation; // i.e. no gc allowed. | 2741 DisallowHeapAllocation no_allocation; // i.e. no gc allowed. |
| 2780 | 2742 |
| 2781 private: | 2743 private: |
| 2782 DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer); | 2744 DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer); |
| 2783 }; | 2745 }; |
| 2784 #endif // DEBUG | 2746 #endif // DEBUG |
| 2785 } | 2747 } |
| 2786 } // namespace v8::internal | 2748 } // namespace v8::internal |
| 2787 | 2749 |
| 2788 #endif // V8_HEAP_HEAP_H_ | 2750 #endif // V8_HEAP_HEAP_H_ |
| OLD | NEW |