Index: src/heap/spaces.h |
diff --git a/src/heap/spaces.h b/src/heap/spaces.h |
index 9a91abb537145b31a128c2a89c703244f1eca360..552eaa614477d5ae4009a0ae0e4d925133a3fc1e 100644 |
--- a/src/heap/spaces.h |
+++ b/src/heap/spaces.h |
@@ -268,6 +268,22 @@ class SlotsBuffer; |
// any heap object. |
class MemoryChunk { |
public: |
+ // |kCompactionDone|: Initial compaction state of a |MemoryChunk|. |
+ // |kCompactingFinalize|: Parallel compaction is done but the chunk needs to |
+ // be finalized. |
+ // |kCompactingInProgress|: Parallel compaction is currently in progress. |
+ // |kCompactingAborted|: Parallel compaction has been aborted, which should |
+ // for now only happen in OOM scenarios. |
+ // |kNoEvacuationCandidate|: Page was no evacuation candidate by the time the |
+ // compactor inspected it. |
+ enum ParallelCompactingState { |
+ kCompactingDone, |
+ kCompactingFinalize, |
+ kCompactingInProgress, |
+ kCompactingAborted, |
+ kNoEvacuationCandidate, |
+ }; |
+ |
// Only works if the pointer is in the first kPageSize of the MemoryChunk. |
static MemoryChunk* FromAddress(Address a) { |
return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); |
@@ -458,6 +474,10 @@ class MemoryChunk { |
base::Release_Store(¶llel_sweeping_, state); |
} |
+ AtomicValue<ParallelCompactingState>& parallel_compaction_state() { |
+ return parallel_compaction_; |
+ } |
+ |
bool TryLock() { return mutex_->TryLock(); } |
base::Mutex* mutex() { return mutex_; } |
@@ -566,6 +586,7 @@ class MemoryChunk { |
+ kPointerSize // AtomicValue high_water_mark_ |
+ kPointerSize // base::Mutex* mutex_ |
+ kPointerSize // base::AtomicWord parallel_sweeping_ |
+ + kPointerSize // AtomicFlag parallel_compaction_ |
+ 5 * kPointerSize // AtomicNumber free-list statistics |
+ kPointerSize // base::AtomicWord next_chunk_ |
+ kPointerSize; // base::AtomicWord prev_chunk_ |
@@ -694,6 +715,20 @@ class MemoryChunk { |
!chunk->high_water_mark_.TrySetValue(old_mark, new_mark)); |
} |
+ |
+#define FRAGMENTATION_STATS_ACCESSORS(type, name) \ |
+ type name() { return name##_.Value(); } \ |
+ void set_##name(type name) { name##_.SetValue(name); } \ |
+ void add_##name(type name) { name##_.Increment(name); } |
+ |
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks) |
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list) |
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list) |
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list) |
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list) |
+ |
+#undef FRAGMENTATION_STATS_ACCESSORS |
+ |
protected: |
size_t size_; |
intptr_t flags_; |
@@ -726,6 +761,7 @@ class MemoryChunk { |
base::Mutex* mutex_; |
base::AtomicWord parallel_sweeping_; |
+ AtomicValue<ParallelCompactingState> parallel_compaction_; |
// PagedSpace free-list statistics. |
AtomicNumber<intptr_t> available_in_small_free_list_; |
@@ -1021,6 +1057,10 @@ class CodeRange { |
// access both lists concurrently to the main thread. |
base::Mutex code_range_mutex_; |
+ // ReserveBlock can be calleed concurrently. TODO: Merge this into a single |
+ // lock for CodeRange. |
+ base::Mutex reserve_block_mutex_; |
+ |
// Freed blocks of memory are added to the free list. When the allocation |
// list is exhausted, the free list is sorted and merged to make the new |
// allocation list. |
@@ -1613,6 +1653,9 @@ class FreeList { |
intptr_t Concatenate(FreeList* free_list); |
+ // Evenly divide available blocks into {free_lists_to_fill}. |
+ void Divide(FreeList** free_lists_to_fill, size_t num); |
+ |
// Clear the free list. |
void Reset(); |
@@ -1673,6 +1716,7 @@ class FreeList { |
FreeListCategory* large_list() { return &large_list_; } |
FreeListCategory* huge_list() { return &huge_list_; } |
+ |
private: |
// The size range of blocks, in bytes. |
static const int kMinBlockSize = 3 * kPointerSize; |
@@ -1695,6 +1739,8 @@ class FreeList { |
FreeListCategory large_list_; |
FreeListCategory huge_list_; |
+ friend class CompactionSpace; |
+ |
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList); |
}; |
@@ -2744,6 +2790,40 @@ class CompactionSpace : public PagedSpace { |
}; |
+class CompactionSpaces : public Malloced { |
+ public: |
+ explicit CompactionSpaces(Heap* heap) |
+ : old_space_(new CompactionSpace(heap, OLD_SPACE, |
+ Executability::NOT_EXECUTABLE)), |
+ code_space_( |
+ new CompactionSpace(heap, CODE_SPACE, Executability::EXECUTABLE)) {} |
+ |
+ ~CompactionSpaces() { |
+ delete old_space_; |
+ delete code_space_; |
+ } |
+ |
+ CompactionSpace* Get(AllocationSpace space) { |
+ switch (space) { |
+ case OLD_SPACE: |
+ DCHECK(old_space_ != nullptr); |
+ return old_space_; |
+ case CODE_SPACE: |
+ DCHECK(code_space_ != nullptr); |
+ return code_space_; |
+ default: |
+ UNREACHABLE(); |
+ } |
+ UNREACHABLE(); |
+ return nullptr; |
+ } |
+ |
+ private: |
+ CompactionSpace* old_space_; |
+ CompactionSpace* code_space_; |
+}; |
+ |
+ |
// ----------------------------------------------------------------------------- |
// Old object space (includes the old space of objects and code space) |