Index: src/heap/heap.h |
diff --git a/src/heap/heap.h b/src/heap/heap.h |
index af9d0a6235f0f40426f7062086e8c67eda03c4f0..a0035bbc7103338edead9a46a7bd3a2be11701be 100644 |
--- a/src/heap/heap.h |
+++ b/src/heap/heap.h |
@@ -638,6 +638,8 @@ class Heap { |
enum PretenuringFeedbackInsertionMode { kCached, kGlobal }; |
+ enum FindMementoMode { kForRuntime, kForGC }; |
+ |
enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT }; |
// Taking this lock prevents the GC from entering a phase that relocates |
@@ -793,6 +795,8 @@ class Heap { |
// stored on the map to facilitate fast dispatch for {StaticVisitorBase}. |
static int GetStaticVisitorIdForMap(Map* map); |
+ static inline uint32_t ObjectHash(Address address); |
+ |
// Notifies the heap that is ok to start marking or other activities that |
// should not happen during deserialization. |
void NotifyDeserializationComplete(); |
@@ -927,6 +931,7 @@ class Heap { |
// If an object has an AllocationMemento trailing it, return it, otherwise |
// return NULL; |
+ template <FindMementoMode mode> |
inline AllocationMemento* FindAllocationMemento(HeapObject* object); |
// Returns false if not able to reserve. |
@@ -1408,13 +1413,13 @@ class Heap { |
void UpdateSurvivalStatistics(int start_new_space_size); |
- inline void IncrementPromotedObjectsSize(int object_size) { |
+ inline void IncrementPromotedObjectsSize(intptr_t object_size) { |
DCHECK_GE(object_size, 0); |
promoted_objects_size_ += object_size; |
} |
inline intptr_t promoted_objects_size() { return promoted_objects_size_; } |
- inline void IncrementSemiSpaceCopiedObjectSize(int object_size) { |
+ inline void IncrementSemiSpaceCopiedObjectSize(intptr_t object_size) { |
DCHECK_GE(object_size, 0); |
semi_space_copied_object_size_ += object_size; |
} |
@@ -1432,8 +1437,8 @@ class Heap { |
inline void IncrementNodesPromoted() { nodes_promoted_++; } |
- inline void IncrementYoungSurvivorsCounter(int survived) { |
- DCHECK(survived >= 0); |
+ inline void IncrementYoungSurvivorsCounter(intptr_t survived) { |
+ DCHECK_GE(survived, 0); |
survived_last_scavenge_ = survived; |
survived_since_last_expansion_ += survived; |
} |
@@ -2182,10 +2187,10 @@ class Heap { |
// For keeping track of how much data has survived |
// scavenge since last new space expansion. |
- int survived_since_last_expansion_; |
+ intptr_t survived_since_last_expansion_; |
// ... and since the last scavenge. |
- int survived_last_scavenge_; |
+ intptr_t survived_last_scavenge_; |
// This is not the depth of nested AlwaysAllocateScope's but rather a single |
// count, as scopes can be acquired from multiple tasks (read: threads). |