Index: src/heap/spaces.h |
diff --git a/src/heap/spaces.h b/src/heap/spaces.h |
index 9a91abb537145b31a128c2a89c703244f1eca360..3c3157ec432241a3cb9c1b92dfa0f35db9d63c63 100644 |
--- a/src/heap/spaces.h |
+++ b/src/heap/spaces.h |
@@ -268,6 +268,19 @@ class SlotsBuffer; |
// any heap object. |
class MemoryChunk { |
public: |
+ // |kCompactionDone|: Initial compaction state of a |MemoryChunk|. |
+ // |kCompactingInProgress|: Parallel compaction is currently in progress. |
+ // |kCompactingFinalize|: Parallel compaction is done but the chunk needs to |
+ // be finalized. |
+ // |kCompactingAborted|: Parallel compaction has been aborted, which should |
+ // for now only happen in OOM scenarios. |
+ enum ParallelCompactingState { |
+ kCompactingDone, |
+ kCompactingInProgress, |
+ kCompactingFinalize, |
+ kCompactingAborted, |
+ }; |
+ |
// Only works if the pointer is in the first kPageSize of the MemoryChunk. |
static MemoryChunk* FromAddress(Address a) { |
return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); |
@@ -458,6 +471,10 @@ class MemoryChunk { |
base::Release_Store(¶llel_sweeping_, state); |
} |
+ AtomicValue<ParallelCompactingState>& parallel_compaction_state() { |
+ return parallel_compaction_; |
+ } |
+ |
bool TryLock() { return mutex_->TryLock(); } |
base::Mutex* mutex() { return mutex_; } |
@@ -566,6 +583,7 @@ class MemoryChunk { |
+ kPointerSize // AtomicValue high_water_mark_ |
+ kPointerSize // base::Mutex* mutex_ |
+ kPointerSize // base::AtomicWord parallel_sweeping_ |
+ + kPointerSize // AtomicValue parallel_compaction_ |
+ 5 * kPointerSize // AtomicNumber free-list statistics |
+ kPointerSize // base::AtomicWord next_chunk_ |
+ kPointerSize; // base::AtomicWord prev_chunk_ |
@@ -726,6 +744,7 @@ class MemoryChunk { |
base::Mutex* mutex_; |
base::AtomicWord parallel_sweeping_; |
+ AtomicValue<ParallelCompactingState> parallel_compaction_; |
// PagedSpace free-list statistics. |
AtomicNumber<intptr_t> available_in_small_free_list_; |
@@ -986,9 +1005,6 @@ class CodeRange { |
bool UncommitRawMemory(Address start, size_t length); |
void FreeRawMemory(Address buf, size_t length); |
- void ReserveEmergencyBlock(); |
- void ReleaseEmergencyBlock(); |
- |
private: |
// Frees the range of virtual memory, and frees the data structures used to |
// manage it. |
@@ -1031,12 +1047,6 @@ class CodeRange { |
List<FreeBlock> allocation_list_; |
int current_allocation_block_index_; |
- // Emergency block guarantees that we can always allocate a page for |
- // evacuation candidates when code space is compacted. Emergency block is |
- // reserved immediately after GC and is released immedietely before |
- // allocating a page for evacuation. |
- FreeBlock emergency_block_; |
- |
// Finds a block on the allocation list that contains at least the |
// requested amount of memory. If none is found, sorts and merges |
// the existing free memory blocks, and searches again. |
@@ -1969,17 +1979,12 @@ class PagedSpace : public Space { |
// Return size of allocatable area on a page in this space. |
inline int AreaSize() { return area_size_; } |
- void CreateEmergencyMemory(); |
- void FreeEmergencyMemory(); |
- void UseEmergencyMemory(); |
- intptr_t MaxEmergencyMemoryAllocated(); |
- |
- bool HasEmergencyMemory() { return emergency_memory_ != NULL; } |
- |
// Merges {other} into the current space. Note that this modifies {other}, |
// e.g., removes its bump pointer area and resets statistics. |
void MergeCompactionSpace(CompactionSpace* other); |
+ void MoveOverFreeMemory(PagedSpace* other); |
+ |
protected: |
// PagedSpaces that should be included in snapshots have different, i.e., |
// smaller, initial pages. |
@@ -2040,12 +2045,6 @@ class PagedSpace : public Space { |
// end_of_unswept_pages_ page. |
Page* end_of_unswept_pages_; |
- // Emergency memory is the memory of a full page for a given space, allocated |
- // conservatively before evacuating a page. If compaction fails due to out |
- // of memory error the emergency memory can be used to complete compaction. |
- // If not used, the emergency memory is released after compaction. |
- MemoryChunk* emergency_memory_; |
- |
// Mutex guarding any concurrent access to the space. |
base::Mutex space_mutex_; |
@@ -2744,6 +2743,32 @@ class CompactionSpace : public PagedSpace { |
}; |
+// A collection of |CompactionSpace|s used by a single compaction task. |
+class CompactionSpaceCollection : public Malloced { |
+ public: |
+ explicit CompactionSpaceCollection(Heap* heap) |
+ : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE), |
+ code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {} |
+ |
+ CompactionSpace* Get(AllocationSpace space) { |
+ switch (space) { |
+ case OLD_SPACE: |
+ return &old_space_; |
+ case CODE_SPACE: |
+ return &code_space_; |
+ default: |
+ UNREACHABLE(); |
+ } |
+ UNREACHABLE(); |
+ return nullptr; |
+ } |
+ |
+ private: |
+ CompactionSpace old_space_; |
+ CompactionSpace code_space_; |
+}; |
+ |
+ |
// ----------------------------------------------------------------------------- |
// Old object space (includes the old space of objects and code space) |