| Index: src/heap/heap.h
|
| diff --git a/src/heap/heap.h b/src/heap/heap.h
|
| index bc356188a01392fefb735bd8367415cf1ac7af6e..07cd73469c71fde4b86a1ed177c5704da8198649 100644
|
| --- a/src/heap/heap.h
|
| +++ b/src/heap/heap.h
|
| @@ -450,6 +450,8 @@ class Heap {
|
|
|
| enum PretenuringFeedbackInsertionMode { kCached, kGlobal };
|
|
|
| + enum FindMementoMode { kForRuntime, kForGC };
|
| +
|
| enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
|
|
|
| // Taking this lock prevents the GC from entering a phase that relocates
|
| @@ -739,6 +741,7 @@ class Heap {
|
|
|
| // If an object has an AllocationMemento trailing it, return it, otherwise
|
| // return NULL;
|
| + template <FindMementoMode mode>
|
| inline AllocationMemento* FindAllocationMemento(HeapObject* object);
|
|
|
| // Returns false if not able to reserve.
|
| @@ -1219,13 +1222,13 @@ class Heap {
|
|
|
| void UpdateSurvivalStatistics(int start_new_space_size);
|
|
|
| - inline void IncrementPromotedObjectsSize(int object_size) {
|
| + inline void IncrementPromotedObjectsSize(intptr_t object_size) {
|
| DCHECK_GE(object_size, 0);
|
| promoted_objects_size_ += object_size;
|
| }
|
| inline intptr_t promoted_objects_size() { return promoted_objects_size_; }
|
|
|
| - inline void IncrementSemiSpaceCopiedObjectSize(int object_size) {
|
| + inline void IncrementSemiSpaceCopiedObjectSize(intptr_t object_size) {
|
| DCHECK_GE(object_size, 0);
|
| semi_space_copied_object_size_ += object_size;
|
| }
|
| @@ -1243,8 +1246,8 @@ class Heap {
|
|
|
| inline void IncrementNodesPromoted() { nodes_promoted_++; }
|
|
|
| - inline void IncrementYoungSurvivorsCounter(int survived) {
|
| - DCHECK(survived >= 0);
|
| + inline void IncrementYoungSurvivorsCounter(intptr_t survived) {
|
| + DCHECK_GE(survived, 0);
|
| survived_last_scavenge_ = survived;
|
| survived_since_last_expansion_ += survived;
|
| }
|
| @@ -1993,10 +1996,10 @@ class Heap {
|
|
|
| // For keeping track of how much data has survived
|
| // scavenge since last new space expansion.
|
| - int survived_since_last_expansion_;
|
| + intptr_t survived_since_last_expansion_;
|
|
|
| // ... and since the last scavenge.
|
| - int survived_last_scavenge_;
|
| + intptr_t survived_last_scavenge_;
|
|
|
| // This is not the depth of nested AlwaysAllocateScope's but rather a single
|
| // count, as scopes can be acquired from multiple tasks (read: threads).
|
|
|