Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(388)

Unified Diff: src/heap/spaces.h

Issue 1347873003: Revert of [heap] Introduce parallel compaction algorithm. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@counters-2nd-try
Patch Set: Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/heap/mark-compact.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/heap/spaces.h
diff --git a/src/heap/spaces.h b/src/heap/spaces.h
index 3c3157ec432241a3cb9c1b92dfa0f35db9d63c63..9a91abb537145b31a128c2a89c703244f1eca360 100644
--- a/src/heap/spaces.h
+++ b/src/heap/spaces.h
@@ -268,19 +268,6 @@
// any heap object.
class MemoryChunk {
public:
- // |kCompactionDone|: Initial compaction state of a |MemoryChunk|.
- // |kCompactingInProgress|: Parallel compaction is currently in progress.
- // |kCompactingFinalize|: Parallel compaction is done but the chunk needs to
- // be finalized.
- // |kCompactingAborted|: Parallel compaction has been aborted, which should
- // for now only happen in OOM scenarios.
- enum ParallelCompactingState {
- kCompactingDone,
- kCompactingInProgress,
- kCompactingFinalize,
- kCompactingAborted,
- };
-
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
@@ -471,10 +458,6 @@
base::Release_Store(&parallel_sweeping_, state);
}
- AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
- return parallel_compaction_;
- }
-
bool TryLock() { return mutex_->TryLock(); }
base::Mutex* mutex() { return mutex_; }
@@ -583,7 +566,6 @@
+ kPointerSize // AtomicValue high_water_mark_
+ kPointerSize // base::Mutex* mutex_
+ kPointerSize // base::AtomicWord parallel_sweeping_
- + kPointerSize // AtomicValue parallel_compaction_
+ 5 * kPointerSize // AtomicNumber free-list statistics
+ kPointerSize // base::AtomicWord next_chunk_
+ kPointerSize; // base::AtomicWord prev_chunk_
@@ -744,7 +726,6 @@
base::Mutex* mutex_;
base::AtomicWord parallel_sweeping_;
- AtomicValue<ParallelCompactingState> parallel_compaction_;
// PagedSpace free-list statistics.
AtomicNumber<intptr_t> available_in_small_free_list_;
@@ -1004,6 +985,9 @@
bool CommitRawMemory(Address start, size_t length);
bool UncommitRawMemory(Address start, size_t length);
void FreeRawMemory(Address buf, size_t length);
+
+ void ReserveEmergencyBlock();
+ void ReleaseEmergencyBlock();
private:
// Frees the range of virtual memory, and frees the data structures used to
@@ -1047,6 +1031,12 @@
List<FreeBlock> allocation_list_;
int current_allocation_block_index_;
+ // Emergency block guarantees that we can always allocate a page for
+ // evacuation candidates when code space is compacted. Emergency block is
+ // reserved immediately after GC and is released immedietely before
+ // allocating a page for evacuation.
+ FreeBlock emergency_block_;
+
// Finds a block on the allocation list that contains at least the
// requested amount of memory. If none is found, sorts and merges
// the existing free memory blocks, and searches again.
@@ -1979,11 +1969,16 @@
// Return size of allocatable area on a page in this space.
inline int AreaSize() { return area_size_; }
+ void CreateEmergencyMemory();
+ void FreeEmergencyMemory();
+ void UseEmergencyMemory();
+ intptr_t MaxEmergencyMemoryAllocated();
+
+ bool HasEmergencyMemory() { return emergency_memory_ != NULL; }
+
// Merges {other} into the current space. Note that this modifies {other},
// e.g., removes its bump pointer area and resets statistics.
void MergeCompactionSpace(CompactionSpace* other);
-
- void MoveOverFreeMemory(PagedSpace* other);
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
@@ -2044,6 +2039,12 @@
// and sweep these pages concurrently. They will stop sweeping after the
// end_of_unswept_pages_ page.
Page* end_of_unswept_pages_;
+
+ // Emergency memory is the memory of a full page for a given space, allocated
+ // conservatively before evacuating a page. If compaction fails due to out
+ // of memory error the emergency memory can be used to complete compaction.
+ // If not used, the emergency memory is released after compaction.
+ MemoryChunk* emergency_memory_;
// Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_;
@@ -2743,32 +2744,6 @@
};
-// A collection of |CompactionSpace|s used by a single compaction task.
-class CompactionSpaceCollection : public Malloced {
- public:
- explicit CompactionSpaceCollection(Heap* heap)
- : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
- code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
-
- CompactionSpace* Get(AllocationSpace space) {
- switch (space) {
- case OLD_SPACE:
- return &old_space_;
- case CODE_SPACE:
- return &code_space_;
- default:
- UNREACHABLE();
- }
- UNREACHABLE();
- return nullptr;
- }
-
- private:
- CompactionSpace old_space_;
- CompactionSpace code_space_;
-};
-
-
// -----------------------------------------------------------------------------
// Old object space (includes the old space of objects and code space)
« no previous file with comments | « src/heap/mark-compact.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698