Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(545)

Unified Diff: src/heap/spaces.h

Issue 2796233003: [heap] Evacuation for young generation (Closed)
Patch Set: Fix recording Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: src/heap/spaces.h
diff --git a/src/heap/spaces.h b/src/heap/spaces.h
index ff27d09c3aac38b2eedf1bda874fa3d3fc1ebcde..40e20dd0f013961b10741d00debdce6347670377 100644
--- a/src/heap/spaces.h
+++ b/src/heap/spaces.h
@@ -524,6 +524,8 @@ class MemoryChunk {
!IsFlagSet(COMPACTION_WAS_ABORTED);
}
+ bool CanUseForAllocation() { return CanAllocate() && !NeverEvacuate(); }
+
Executability executable() {
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
}
@@ -670,9 +672,11 @@ class MarkingState {
MarkingState(Bitmap* bitmap, intptr_t* live_bytes)
: bitmap_(bitmap), live_bytes_(live_bytes) {}
- void IncrementLiveBytes(intptr_t by) const {
+ template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ inline void IncrementLiveBytes(intptr_t by) const {
*live_bytes_ += static_cast<int>(by);
}
+
void SetLiveBytes(intptr_t value) const {
*live_bytes_ = static_cast<int>(value);
}
@@ -690,6 +694,18 @@ class MarkingState {
intptr_t* live_bytes_;
};
+template <>
+inline void MarkingState::IncrementLiveBytes<MarkBit::NON_ATOMIC>(
+ intptr_t by) const {
+ *live_bytes_ += static_cast<int>(by);
+}
+
+template <>
+inline void MarkingState::IncrementLiveBytes<MarkBit::ATOMIC>(
+ intptr_t by) const {
+ reinterpret_cast<base::AtomicNumber<intptr_t>*>(live_bytes_)->Increment(by);
+}
+
// -----------------------------------------------------------------------------
// A page is a memory chunk of a size 1MB. Large object pages may be larger.
//
@@ -1598,28 +1614,28 @@ class AllocationStats BASE_EMBEDDED {
// Zero out all the allocation statistics (i.e., no capacity).
void Clear() {
- capacity_ = 0;
- max_capacity_ = 0;
- size_ = 0;
+ capacity_.SetValue(0);
+ max_capacity_.SetValue(0);
+ size_.SetValue(0);
}
- void ClearSize() { size_ = capacity_; }
+ void ClearSize() { size_.SetValue(capacity_.Value()); }
// Accessors for the allocation statistics.
- size_t Capacity() { return capacity_; }
- size_t MaxCapacity() { return max_capacity_; }
- size_t Size() { return size_; }
+ size_t Capacity() { return capacity_.Value(); }
+ size_t MaxCapacity() { return max_capacity_.Value(); }
+ size_t Size() { return size_.Value(); }
// Grow the space by adding available bytes. They are initially marked as
// being in use (part of the size), but will normally be immediately freed,
// putting them on the free list and removing them from size_.
void ExpandSpace(size_t bytes) {
- DCHECK_GE(size_ + bytes, size_);
- DCHECK_GE(capacity_ + bytes, capacity_);
- capacity_ += bytes;
- size_ += bytes;
- if (capacity_ > max_capacity_) {
- max_capacity_ = capacity_;
+ DCHECK_GE(size_.Value() + bytes, size_.Value());
+ DCHECK_GE(capacity_.Value() + bytes, capacity_.Value());
+ capacity_.Increment(bytes);
Michael Lippautz 2017/04/21 07:05:52 This should be save even though there are 2 counte
Hannes Payer (out of office) 2017/04/21 14:46:27 Acknowledged.
+ size_.Increment(bytes);
+ if (capacity_.Value() > max_capacity_.Value()) {
+ max_capacity_.SetValue(capacity_.Value());
}
}
@@ -1627,54 +1643,54 @@ class AllocationStats BASE_EMBEDDED {
// during sweeping, bytes have been marked as being in use (part of the size)
// and are hereby freed.
void ShrinkSpace(size_t bytes) {
- DCHECK_GE(capacity_, bytes);
- DCHECK_GE(size_, bytes);
- capacity_ -= bytes;
- size_ -= bytes;
+ DCHECK_GE(capacity_.Value(), bytes);
+ DCHECK_GE(size_.Value(), bytes);
+ capacity_.Decrement(bytes);
+ size_.Decrement(bytes);
}
void AllocateBytes(size_t bytes) {
- DCHECK_GE(size_ + bytes, size_);
- size_ += bytes;
+ DCHECK_GE(size_.Value() + bytes, size_.Value());
+ size_.Increment(bytes);
}
void DeallocateBytes(size_t bytes) {
- DCHECK_GE(size_, bytes);
- size_ -= bytes;
+ DCHECK_GE(size_.Value(), bytes);
+ size_.Decrement(bytes);
}
void DecreaseCapacity(size_t bytes) {
- DCHECK_GE(capacity_, bytes);
- DCHECK_GE(capacity_ - bytes, size_);
- capacity_ -= bytes;
+ DCHECK_GE(capacity_.Value(), bytes);
+ DCHECK_GE(capacity_.Value() - bytes, size_.Value());
+ capacity_.Decrement(bytes);
}
void IncreaseCapacity(size_t bytes) {
- DCHECK_GE(capacity_ + bytes, capacity_);
- capacity_ += bytes;
+ DCHECK_GE(capacity_.Value() + bytes, capacity_.Value());
+ capacity_.Increment(bytes);
}
// Merge |other| into |this|.
void Merge(const AllocationStats& other) {
- DCHECK_GE(capacity_ + other.capacity_, capacity_);
- DCHECK_GE(size_ + other.size_, size_);
- capacity_ += other.capacity_;
- size_ += other.size_;
- if (other.max_capacity_ > max_capacity_) {
- max_capacity_ = other.max_capacity_;
+ DCHECK_GE(capacity_.Value() + other.capacity_.Value(), capacity_.Value());
+ DCHECK_GE(size_.Value() + other.size_.Value(), size_.Value());
+ capacity_.Increment(other.capacity_.Value());
+ size_.Increment(other.size_.Value());
+ if (other.max_capacity_.Value() > max_capacity_.Value()) {
+ max_capacity_.SetValue(other.max_capacity_.Value());
}
}
private:
// |capacity_|: The number of object-area bytes (i.e., not including page
// bookkeeping structures) currently in the space.
- size_t capacity_;
+ base::AtomicNumber<size_t> capacity_;
// |max_capacity_|: The maximum capacity ever observed.
- size_t max_capacity_;
+ base::AtomicNumber<size_t> max_capacity_;
// |size_|: The number of allocated bytes.
- size_t size_;
+ base::AtomicNumber<size_t> size_;
};
// A free list maintaining free blocks of memory. The free list is organized in
@@ -1940,6 +1956,10 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
public:
typedef PageIterator iterator;
+ // Reuse a page for allocation only if it has at least {kPageReuseThreshold}
+ // memory available in its FreeList.
+ static const size_t kPageReuseThreshold = 4 * KB;
+
static const intptr_t kCompactionMemoryWanted = 500 * KB;
// Creates a space with an id.
@@ -2146,6 +2166,9 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
std::unique_ptr<ObjectIterator> GetObjectIterator() override;
+ Page* RemovePageSafe();
+ void AddPage(Page* page);
+
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
« src/heap/mark-compact.cc ('K') | « src/heap/mark-compact.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698