OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_HEAP_H_ | 5 #ifndef V8_HEAP_HEAP_H_ |
6 #define V8_HEAP_HEAP_H_ | 6 #define V8_HEAP_HEAP_H_ |
7 | 7 |
8 #include <cmath> | 8 #include <cmath> |
9 #include <map> | 9 #include <map> |
10 | 10 |
(...skipping 432 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
443 }; | 443 }; |
444 | 444 |
445 // Indicates whether live bytes adjustment is triggered | 445 // Indicates whether live bytes adjustment is triggered |
446 // - from within the GC code before sweeping started (SEQUENTIAL_TO_SWEEPER), | 446 // - from within the GC code before sweeping started (SEQUENTIAL_TO_SWEEPER), |
447 // - or from within GC (CONCURRENT_TO_SWEEPER), | 447 // - or from within GC (CONCURRENT_TO_SWEEPER), |
448 // - or mutator code (CONCURRENT_TO_SWEEPER). | 448 // - or mutator code (CONCURRENT_TO_SWEEPER). |
449 enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER }; | 449 enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER }; |
450 | 450 |
451 enum PretenuringFeedbackInsertionMode { kCached, kGlobal }; | 451 enum PretenuringFeedbackInsertionMode { kCached, kGlobal }; |
452 | 452 |
| 453 enum FindMementoMode { kForRuntime, kForGC }; |
| 454 |
453 enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT }; | 455 enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT }; |
454 | 456 |
455 // Taking this lock prevents the GC from entering a phase that relocates | 457 // Taking this lock prevents the GC from entering a phase that relocates |
456 // object references. | 458 // object references. |
457 class RelocationLock { | 459 class RelocationLock { |
458 public: | 460 public: |
459 explicit RelocationLock(Heap* heap) : heap_(heap) { | 461 explicit RelocationLock(Heap* heap) : heap_(heap) { |
460 heap_->relocation_mutex_.Lock(); | 462 heap_->relocation_mutex_.Lock(); |
461 } | 463 } |
462 | 464 |
(...skipping 269 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
732 | 734 |
733 // Print short heap statistics. | 735 // Print short heap statistics. |
734 void PrintShortHeapStatistics(); | 736 void PrintShortHeapStatistics(); |
735 | 737 |
736 inline HeapState gc_state() { return gc_state_; } | 738 inline HeapState gc_state() { return gc_state_; } |
737 | 739 |
738 inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; } | 740 inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; } |
739 | 741 |
740 // If an object has an AllocationMemento trailing it, return it, otherwise | 742 // If an object has an AllocationMemento trailing it, return it, otherwise |
741 // return NULL; | 743 // return NULL; |
| 744 template <FindMementoMode mode> |
742 inline AllocationMemento* FindAllocationMemento(HeapObject* object); | 745 inline AllocationMemento* FindAllocationMemento(HeapObject* object); |
743 | 746 |
744 // Returns false if not able to reserve. | 747 // Returns false if not able to reserve. |
745 bool ReserveSpace(Reservation* reservations); | 748 bool ReserveSpace(Reservation* reservations); |
746 | 749 |
747 // | 750 // |
748 // Support for the API. | 751 // Support for the API. |
749 // | 752 // |
750 | 753 |
751 void CreateApiObjects(); | 754 void CreateApiObjects(); |
(...skipping 460 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1212 // Returns the available bytes in space w/o growing. | 1215 // Returns the available bytes in space w/o growing. |
1213 // Heap doesn't guarantee that it can allocate an object that requires | 1216 // Heap doesn't guarantee that it can allocate an object that requires |
1214 // all available bytes. Check MaxHeapObjectSize() instead. | 1217 // all available bytes. Check MaxHeapObjectSize() instead. |
1215 intptr_t Available(); | 1218 intptr_t Available(); |
1216 | 1219 |
1217 // Returns of size of all objects residing in the heap. | 1220 // Returns of size of all objects residing in the heap. |
1218 intptr_t SizeOfObjects(); | 1221 intptr_t SizeOfObjects(); |
1219 | 1222 |
1220 void UpdateSurvivalStatistics(int start_new_space_size); | 1223 void UpdateSurvivalStatistics(int start_new_space_size); |
1221 | 1224 |
1222 inline void IncrementPromotedObjectsSize(int object_size) { | 1225 inline void IncrementPromotedObjectsSize(intptr_t object_size) { |
1223 DCHECK_GE(object_size, 0); | 1226 DCHECK_GE(object_size, 0); |
1224 promoted_objects_size_ += object_size; | 1227 promoted_objects_size_ += object_size; |
1225 } | 1228 } |
1226 inline intptr_t promoted_objects_size() { return promoted_objects_size_; } | 1229 inline intptr_t promoted_objects_size() { return promoted_objects_size_; } |
1227 | 1230 |
1228 inline void IncrementSemiSpaceCopiedObjectSize(int object_size) { | 1231 inline void IncrementSemiSpaceCopiedObjectSize(intptr_t object_size) { |
1229 DCHECK_GE(object_size, 0); | 1232 DCHECK_GE(object_size, 0); |
1230 semi_space_copied_object_size_ += object_size; | 1233 semi_space_copied_object_size_ += object_size; |
1231 } | 1234 } |
1232 inline intptr_t semi_space_copied_object_size() { | 1235 inline intptr_t semi_space_copied_object_size() { |
1233 return semi_space_copied_object_size_; | 1236 return semi_space_copied_object_size_; |
1234 } | 1237 } |
1235 | 1238 |
1236 inline intptr_t SurvivedNewSpaceObjectSize() { | 1239 inline intptr_t SurvivedNewSpaceObjectSize() { |
1237 return promoted_objects_size_ + semi_space_copied_object_size_; | 1240 return promoted_objects_size_ + semi_space_copied_object_size_; |
1238 } | 1241 } |
1239 | 1242 |
1240 inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; } | 1243 inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; } |
1241 | 1244 |
1242 inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; } | 1245 inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; } |
1243 | 1246 |
1244 inline void IncrementNodesPromoted() { nodes_promoted_++; } | 1247 inline void IncrementNodesPromoted() { nodes_promoted_++; } |
1245 | 1248 |
1246 inline void IncrementYoungSurvivorsCounter(int survived) { | 1249 inline void IncrementYoungSurvivorsCounter(intptr_t survived) { |
1247 DCHECK(survived >= 0); | 1250 DCHECK_GE(survived, 0); |
1248 survived_last_scavenge_ = survived; | 1251 survived_last_scavenge_ = survived; |
1249 survived_since_last_expansion_ += survived; | 1252 survived_since_last_expansion_ += survived; |
1250 } | 1253 } |
1251 | 1254 |
1252 inline intptr_t PromotedTotalSize() { | 1255 inline intptr_t PromotedTotalSize() { |
1253 int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize(); | 1256 int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize(); |
1254 if (total > std::numeric_limits<intptr_t>::max()) { | 1257 if (total > std::numeric_limits<intptr_t>::max()) { |
1255 // TODO(erikcorry): Use uintptr_t everywhere we do heap size calculations. | 1258 // TODO(erikcorry): Use uintptr_t everywhere we do heap size calculations. |
1256 return std::numeric_limits<intptr_t>::max(); | 1259 return std::numeric_limits<intptr_t>::max(); |
1257 } | 1260 } |
(...skipping 728 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1986 int initial_semispace_size_; | 1989 int initial_semispace_size_; |
1987 int target_semispace_size_; | 1990 int target_semispace_size_; |
1988 intptr_t max_old_generation_size_; | 1991 intptr_t max_old_generation_size_; |
1989 intptr_t initial_old_generation_size_; | 1992 intptr_t initial_old_generation_size_; |
1990 bool old_generation_size_configured_; | 1993 bool old_generation_size_configured_; |
1991 intptr_t max_executable_size_; | 1994 intptr_t max_executable_size_; |
1992 intptr_t maximum_committed_; | 1995 intptr_t maximum_committed_; |
1993 | 1996 |
1994 // For keeping track of how much data has survived | 1997 // For keeping track of how much data has survived |
1995 // scavenge since last new space expansion. | 1998 // scavenge since last new space expansion. |
1996 int survived_since_last_expansion_; | 1999 intptr_t survived_since_last_expansion_; |
1997 | 2000 |
1998 // ... and since the last scavenge. | 2001 // ... and since the last scavenge. |
1999 int survived_last_scavenge_; | 2002 intptr_t survived_last_scavenge_; |
2000 | 2003 |
2001 // This is not the depth of nested AlwaysAllocateScope's but rather a single | 2004 // This is not the depth of nested AlwaysAllocateScope's but rather a single |
2002 // count, as scopes can be acquired from multiple tasks (read: threads). | 2005 // count, as scopes can be acquired from multiple tasks (read: threads). |
2003 AtomicNumber<size_t> always_allocate_scope_count_; | 2006 AtomicNumber<size_t> always_allocate_scope_count_; |
2004 | 2007 |
2005 // For keeping track of context disposals. | 2008 // For keeping track of context disposals. |
2006 int contexts_disposed_; | 2009 int contexts_disposed_; |
2007 | 2010 |
2008 // The length of the retained_maps array at the time of context disposal. | 2011 // The length of the retained_maps array at the time of context disposal. |
2009 // This separates maps in the retained_maps array that were created before | 2012 // This separates maps in the retained_maps array that were created before |
(...skipping 642 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2652 | 2655 |
2653 private: | 2656 private: |
2654 friend class NewSpace; | 2657 friend class NewSpace; |
2655 DISALLOW_COPY_AND_ASSIGN(InlineAllocationObserver); | 2658 DISALLOW_COPY_AND_ASSIGN(InlineAllocationObserver); |
2656 }; | 2659 }; |
2657 | 2660 |
2658 } // namespace internal | 2661 } // namespace internal |
2659 } // namespace v8 | 2662 } // namespace v8 |
2660 | 2663 |
2661 #endif // V8_HEAP_HEAP_H_ | 2664 #endif // V8_HEAP_HEAP_H_ |
OLD | NEW |