Index: src/heap/heap.h |
diff --git a/src/heap/heap.h b/src/heap/heap.h |
index 7147529e497c25dfec7ac508eecc08ec644a2558..3c17dc7d9fbfca808679178857ca57e3e1fa96f4 100644 |
--- a/src/heap/heap.h |
+++ b/src/heap/heap.h |
@@ -641,8 +641,6 @@ |
enum PretenuringFeedbackInsertionMode { kCached, kGlobal }; |
- enum FindMementoMode { kForRuntime, kForGC }; |
- |
enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT }; |
// Taking this lock prevents the GC from entering a phase that relocates |
@@ -932,7 +930,6 @@ |
// If an object has an AllocationMemento trailing it, return it, otherwise |
// return NULL; |
- template <FindMementoMode mode> |
inline AllocationMemento* FindAllocationMemento(HeapObject* object); |
// Returns false if not able to reserve. |
@@ -1414,13 +1411,13 @@ |
void UpdateSurvivalStatistics(int start_new_space_size); |
- inline void IncrementPromotedObjectsSize(intptr_t object_size) { |
+ inline void IncrementPromotedObjectsSize(int object_size) { |
DCHECK_GE(object_size, 0); |
promoted_objects_size_ += object_size; |
} |
inline intptr_t promoted_objects_size() { return promoted_objects_size_; } |
- inline void IncrementSemiSpaceCopiedObjectSize(intptr_t object_size) { |
+ inline void IncrementSemiSpaceCopiedObjectSize(int object_size) { |
DCHECK_GE(object_size, 0); |
semi_space_copied_object_size_ += object_size; |
} |
@@ -1438,8 +1435,8 @@ |
inline void IncrementNodesPromoted() { nodes_promoted_++; } |
- inline void IncrementYoungSurvivorsCounter(intptr_t survived) { |
- DCHECK_GE(survived, 0); |
+ inline void IncrementYoungSurvivorsCounter(int survived) { |
+ DCHECK(survived >= 0); |
survived_last_scavenge_ = survived; |
survived_since_last_expansion_ += survived; |
} |
@@ -2188,10 +2185,10 @@ |
// For keeping track of how much data has survived |
// scavenge since last new space expansion. |
- intptr_t survived_since_last_expansion_; |
+ int survived_since_last_expansion_; |
// ... and since the last scavenge. |
- intptr_t survived_last_scavenge_; |
+ int survived_last_scavenge_; |
// This is not the depth of nested AlwaysAllocateScope's but rather a single |
// count, as scopes can be acquired from multiple tasks (read: threads). |