Chromium Code Reviews| Index: src/heap/heap.h |
| diff --git a/src/heap/heap.h b/src/heap/heap.h |
| index af9d0a6235f0f40426f7062086e8c67eda03c4f0..b0e655570fc3c6889657aab6c2274719ca93f801 100644 |
| --- a/src/heap/heap.h |
| +++ b/src/heap/heap.h |
| @@ -638,6 +638,8 @@ class Heap { |
| enum PretenuringFeedbackInsertionMode { kCached, kGlobal }; |
| + enum FindMementoMode { kForRuntime, kForParallelEvacuation }; |
|
Hannes Payer (out of office)
2016/01/20 13:19:39
kForGC would be enough detail.
Michael Lippautz
2016/01/21 10:00:08
Done.
|
| + |
| enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT }; |
| // Taking this lock prevents the GC from entering a phase that relocates |
| @@ -927,6 +929,7 @@ class Heap { |
| // If an object has an AllocationMemento trailing it, return it, otherwise |
| // return NULL; |
| + template <int find_memento_mode> |
|
Hannes Payer (out of office)
2016/01/20 13:19:39
s/int find_memento_mode/Heap:FindMementoMode mode/
Michael Lippautz
2016/01/21 10:00:08
Done.
|
| inline AllocationMemento* FindAllocationMemento(HeapObject* object); |
| // Returns false if not able to reserve. |
| @@ -1408,13 +1411,13 @@ class Heap { |
| void UpdateSurvivalStatistics(int start_new_space_size); |
| - inline void IncrementPromotedObjectsSize(int object_size) { |
| + inline void IncrementPromotedObjectsSize(intptr_t object_size) { |
| DCHECK_GE(object_size, 0); |
| promoted_objects_size_ += object_size; |
| } |
| inline intptr_t promoted_objects_size() { return promoted_objects_size_; } |
| - inline void IncrementSemiSpaceCopiedObjectSize(int object_size) { |
| + inline void IncrementSemiSpaceCopiedObjectSize(intptr_t object_size) { |
| DCHECK_GE(object_size, 0); |
| semi_space_copied_object_size_ += object_size; |
| } |
| @@ -1432,8 +1435,8 @@ class Heap { |
| inline void IncrementNodesPromoted() { nodes_promoted_++; } |
| - inline void IncrementYoungSurvivorsCounter(int survived) { |
| - DCHECK(survived >= 0); |
| + inline void IncrementYoungSurvivorsCounter(intptr_t survived) { |
| + DCHECK_GE(survived, 0); |
| survived_last_scavenge_ = survived; |
| survived_since_last_expansion_ += survived; |
| } |
| @@ -2182,10 +2185,10 @@ class Heap { |
| // For keeping track of how much data has survived |
| // scavenge since last new space expansion. |
| - int survived_since_last_expansion_; |
| + intptr_t survived_since_last_expansion_; |
| // ... and since the last scavenge. |
| - int survived_last_scavenge_; |
| + intptr_t survived_last_scavenge_; |
| // This is not the depth of nested AlwaysAllocateScope's but rather a single |
| // count, as scopes can be acquired from multiple tasks (read: threads). |