Index: src/zone.cc |
diff --git a/src/zone.cc b/src/zone.cc |
index a10b63612e484962870ab2004c6bf2a7ff44f49a..7d7135555a05063c7432aedc99260a37e38cdb6c 100644 |
--- a/src/zone.cc |
+++ b/src/zone.cc |
@@ -5,6 +5,8 @@ |
#include "src/zone.h" |
#include <cstring> |
+#include "include/v8-platform.h" |
+#include "src/base/platform/time.h" |
#include "src/v8.h" |
@@ -41,37 +43,251 @@ const size_t kASanRedzoneBytes = 0; |
} // namespace |
+clock_t begin = clock(); |
// Segments represent chunks of memory: They have starting address |
-// (encoded in the this pointer) and a size in bytes. Segments are |
+// (encoded in the this pointer) and a VirtualMemory instance. Segments are |
// chained together forming a LIFO structure with the newest segment |
-// available as segment_head_. Segments are allocated using malloc() |
-// and de-allocated using free(). |
+// available as segment_head_. Segments are allocated aligned via the |
+// VirtualMemory instance and released using it. |
class Segment { |
public: |
- void Initialize(Segment* next, size_t size) { |
- next_ = next; |
+ void Initialize(Zone* zone, v8::base::VirtualMemory* virtual_memory, |
+ size_t size) { |
+ DCHECK_EQ(reinterpret_cast<uintptr_t>(this) & Zone::kSegmentAlignmentMask, |
+ reinterpret_cast<uintptr_t>(this)); |
+ |
+ next_ = nullptr; |
+ zone_ = zone; |
+ virtual_memory_.Reset(); |
+ virtual_memory_.TakeControl(virtual_memory); |
size_ = size; |
} |
+ void set_zone(Zone* zone) { zone_ = zone; } |
+ |
+ Zone* zone() const { return zone_; } |
Segment* next() const { return next_; } |
- void clear_next() { next_ = nullptr; } |
+ void set_next(Segment* const value) { next_ = value; } |
size_t size() const { return size_; } |
- size_t capacity() const { return size_ - sizeof(Segment); } |
+ |
+ size_t capacity() const { return size() - sizeof(Segment); } |
Address start() const { return address(sizeof(Segment)); } |
- Address end() const { return address(size_); } |
+ Address end() const { return address(size()); } |
+ |
+ bool is_big_object_segment() const { |
+ return size() > Zone::kMaximumSegmentSize; |
+ } |
+ |
+ void Release() { |
+// PrintF("%f; -%lu;0\n", static_cast<double>(clock() - begin) / CLOCKS_PER_SEC, |
+// size_); |
+#ifdef ENABLE_HANDLE_ZAPPING |
+ // We are going to zap the memory the segment is stored in, so we |
+ // need to save the virtual memory information to be able to release |
+ // it. |
+ v8::base::VirtualMemory vm = v8::base::VirtualMemory(); |
+ vm.TakeControl(&virtual_memory_); |
+ // Un-poison first so the zapping doesn't trigger ASan complaints. |
+ ASAN_UNPOISON_MEMORY_REGION(this, size_); |
+ // Zap the entire current segment (including the header). |
+ memset(this, kZapDeadByte, size_); |
+ |
+ vm.Release(); |
+#else |
+ virtual_memory_.Release(); |
+#endif |
+ } |
+ |
+ void Reset() { |
+ // Un-poison so neither the zapping not the reusing does trigger ASan |
+ // complaints. |
+ ASAN_UNPOISON_MEMORY_REGION(virtual_memory_.address(), |
+ virtual_memory_.size()); |
+#ifdef ENABLE_HANDLE_ZAPPING |
+ // Zap the entire current segment (excluding the header). |
+ memset(reinterpret_cast<void*>(start()), kZapDeadByte, capacity()); |
+#endif |
+ next_ = nullptr; |
+ } |
private: |
+#ifdef ENABLE_HANDLE_ZAPPING |
+ // Constant byte value used for zapping dead memory in debug mode. |
+ static const unsigned char kZapDeadByte = 0xcd; |
+#endif |
+ |
// Computes the address of the nth byte in this segment. |
Address address(size_t n) const { return Address(this) + n; } |
+ Zone* zone_; |
Segment* next_; |
+ v8::base::VirtualMemory virtual_memory_; |
+ |
size_t size_; |
+ |
+ DISALLOW_COPY_AND_ASSIGN(Segment); |
}; |
+namespace SegmentPool { |
+namespace { |
+static const uint8_t kMinSegmentSizePower = 13; |
+static const uint8_t kMaxSegmentSizePower = 17; |
+ |
+static const uint8_t kMaxSegmentsPerBucket = 15; |
+ |
+STATIC_ASSERT(kMinSegmentSizePower <= kMaxSegmentSizePower); |
+ |
+static Segment* garbage_segment_stack_head_ = nullptr; |
+ |
+static size_t garbage_segment_stack_size_ = 0; |
+ |
+static v8::base::Mutex* garbage_segments_mutex_ = new base::Mutex(); |
+ |
+static Segment** unused_segments_heads_ = |
+ new Segment*[1 + kMaxSegmentSizePower - kMinSegmentSizePower]; |
+ |
+static size_t* unused_segments_sizes = |
+ new size_t[1 + kMaxSegmentSizePower - kMinSegmentSizePower]; |
+ |
+static size_t unused_segments_size_ = 0; |
+ |
+static v8::base::Mutex* unused_segments_mutex_ = new base::Mutex(); |
+ |
+static v8::base::Semaphore* cleanup_semaphore = new base::Semaphore(1); |
+ |
+static Segment* PopSegmentFromGarbageStack() { |
+ garbage_segments_mutex_->Lock(); |
+ auto result = garbage_segment_stack_head_; |
+ |
+ if (result) { |
+ garbage_segment_stack_head_ = result->next(); |
+ garbage_segment_stack_size_ -= result->size(); |
+ } |
+ |
+ garbage_segments_mutex_->Unlock(); |
+ |
+ return result; |
+} |
+ |
+class SegmentReleaser : public Task { |
+ public: |
+ void Run() override { |
+ ReleaseGarbage(); |
+ cleanup_semaphore->Signal(); |
+ } |
+ |
+ private: |
+ static void ReleaseGarbage() { |
+ while (true) { |
+ Segment* segment = PopSegmentFromGarbageStack(); |
+ |
+ if (segment == nullptr) break; |
+ |
+ segment->Release(); |
+ } |
+ } |
+}; |
+ |
+static void SignalGC() { |
+ if (cleanup_semaphore->WaitFor(base::TimeDelta::FromSeconds(0))) { |
+ V8::GetCurrentPlatform()->CallOnBackgroundThread( |
+ new SegmentReleaser(), Platform::kShortRunningTask); |
+ } |
+} |
+} // namespace |
+ |
+static void PushSegmentToGarbageStack(Segment* segment) { |
+ garbage_segments_mutex_->Lock(); |
+ segment->set_next(garbage_segment_stack_head_); |
+ garbage_segment_stack_head_ = segment; |
+ garbage_segment_stack_size_ += segment->size(); |
+ |
+ if (garbage_segment_stack_size_ > 1 << 20) { |
+ SignalGC(); |
+ } |
+ |
+ garbage_segments_mutex_->Unlock(); |
+} |
+ |
+static Segment* GetSegmentFromPool(size_t requested_size) { |
+ if (requested_size > 1 << kMaxSegmentSizePower) { |
+ return nullptr; |
+ } |
+ |
+ uint8_t power = kMinSegmentSizePower; |
+ |
+ while (requested_size > 1 << power) power++; |
+ |
+ power -= kMinSegmentSizePower; |
+ |
+ DCHECK_GE(power, 0); |
+ |
+ unused_segments_mutex_->Lock(); |
+ |
+ Segment* segment = unused_segments_heads_[power]; |
+ |
+ if (segment) { |
+ unused_segments_heads_[power] = segment->next(); |
+ segment->set_next(nullptr); |
+ |
+ unused_segments_sizes[power]--; |
+ unused_segments_size_ -= segment->size(); |
+ } |
+ |
+ unused_segments_mutex_->Unlock(); |
+ |
+ if (segment) { |
+ DCHECK_GE(segment->size(), requested_size); |
+ // PrintF("%f; 0;-%lu\n", static_cast<double>(clock() - begin) / |
+ // CLOCKS_PER_SEC, segment->size()); |
+ } |
+ return segment; |
+} |
+ |
+static bool AddSegmentToPool(Segment* segment) { |
+ size_t size = segment->size(); |
+ |
+ if (size >= (1 << (kMaxSegmentSizePower + 1))) { |
+ return false; |
+ } |
+ |
+ if (size < (1 << kMinSegmentSizePower)) { |
+ return false; |
+ } |
+ |
+ uint8_t power = kMaxSegmentSizePower; |
+ |
+ while (size < 1 << power) power--; |
+ |
+ power -= kMinSegmentSizePower; |
+ |
+ DCHECK_GE(power, 0); |
+ |
+ unused_segments_mutex_->Lock(); |
+ |
+ if (unused_segments_sizes[power] >= kMaxSegmentsPerBucket) { |
+ unused_segments_mutex_->Unlock(); |
+ return false; |
+ } |
+ |
+ segment->set_next(unused_segments_heads_[power]); |
+ unused_segments_heads_[power] = segment; |
+ unused_segments_size_ += size; |
+ unused_segments_sizes[power]++; |
+ |
+ unused_segments_mutex_->Unlock(); |
+ |
+ // PrintF("%f; 0;+%lu\n", static_cast<double>(clock() - begin) / |
+ // CLOCKS_PER_SEC, size); |
+ |
+ return true; |
+} |
+} // namespace SegmentPool |
+ |
Zone::Zone(base::AccountingAllocator* allocator) |
: allocation_size_(0), |
segment_bytes_allocated_(0), |
@@ -87,6 +303,14 @@ Zone::~Zone() { |
DCHECK(segment_bytes_allocated_ == 0); |
} |
+Segment* Zone::GetZoneSegmentFromPointer(const void* ptr) { |
+ return reinterpret_cast<Segment*>(reinterpret_cast<uintptr_t>(ptr) & |
+ kSegmentAlignmentMask); |
+} |
+ |
+Zone* Zone::GetZoneFromPointer(const void* ptr) { |
+ return GetZoneSegmentFromPointer(ptr)->zone(); |
+} |
void* Zone::New(size_t size) { |
// Round up the requested size to fit the alignment. |
@@ -103,33 +327,52 @@ void* Zone::New(size_t size) { |
// Check if the requested size is available without expanding. |
Address result = position_; |
+ // In case the requested size is zero, we still want to return a pointer |
+ // to a valid segment, so the zone is obtainable from it. |
+ if (size == 0) { |
+ // there has to be a normal segment to reference |
+ if (segment_head_ == nullptr || segment_head_->is_big_object_segment()) { |
+ // We create a segment of minimal size. |
+ result = NewNormalSegment(kAlignment); |
+ } |
+ |
+ DCHECK(!GetZoneSegmentFromPointer(result)->is_big_object_segment()); |
+ DCHECK_EQ(GetZoneFromPointer(result), this); |
+ return reinterpret_cast<void*>(result); |
+ } |
+ |
+ // Large objects are a special case and get their own segment to live in. |
+ if (CalculateSegmentSize(size) > kMaximumSegmentSize) { |
+ result = NewLargeObjectSegment(size); |
+ DCHECK(GetZoneSegmentFromPointer(result)->is_big_object_segment()); |
+ allocation_size_ += size; |
+ return reinterpret_cast<void*>(result); |
+ } |
+ |
const size_t size_with_redzone = size + kASanRedzoneBytes; |
const uintptr_t limit = reinterpret_cast<uintptr_t>(limit_); |
const uintptr_t position = reinterpret_cast<uintptr_t>(position_); |
// position_ > limit_ can be true after the alignment correction above. |
if (limit < position || size_with_redzone > limit - position) { |
- result = NewExpand(size_with_redzone); |
+ result = NewNormalSegment(size_with_redzone); |
} else { |
position_ += size_with_redzone; |
} |
Address redzone_position = result + size; |
- DCHECK(redzone_position + kASanRedzoneBytes == position_); |
+ DCHECK_EQ(redzone_position + kASanRedzoneBytes, position_); |
ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes); |
// Check that the result has the proper alignment and return it. |
DCHECK(IsAddressAligned(result, kAlignment, 0)); |
+ DCHECK(!GetZoneSegmentFromPointer(result)->is_big_object_segment()); |
+ DCHECK_EQ(GetZoneFromPointer(result), this); |
allocation_size_ += size; |
return reinterpret_cast<void*>(result); |
} |
void Zone::DeleteAll() { |
-#ifdef DEBUG |
- // Constant byte value used for zapping dead memory in debug mode. |
- static const unsigned char kZapDeadByte = 0xcd; |
-#endif |
- |
// Find a segment with a suitable size to keep around. |
Segment* keep = nullptr; |
// Traverse the chained list of segments, zapping (in debug mode) |
@@ -139,16 +382,15 @@ void Zone::DeleteAll() { |
if (!keep && current->size() <= kMaximumKeptSegmentSize) { |
// Unlink the segment we wish to keep from the list. |
keep = current; |
- keep->clear_next(); |
+ keep->Reset(); |
} else { |
- size_t size = current->size(); |
-#ifdef DEBUG |
- // Un-poison first so the zapping doesn't trigger ASan complaints. |
- ASAN_UNPOISON_MEMORY_REGION(current, size); |
- // Zap the entire current segment (including the header). |
- memset(current, kZapDeadByte, size); |
-#endif |
- DeleteSegment(current, size); |
+ segment_bytes_allocated_ -= current->size(); |
+ allocator_->ChangeCurrentMemoryUsage( |
+ -static_cast<int64_t>(current->size())); |
+ |
+ if (!SegmentPool::AddSegmentToPool(current)) { |
+ SegmentPool::PushSegmentToGarbageStack(current); |
+ } |
} |
current = next; |
} |
@@ -161,12 +403,6 @@ void Zone::DeleteAll() { |
Address start = keep->start(); |
position_ = RoundUp(start, kAlignment); |
limit_ = keep->end(); |
- // Un-poison so we can re-use the segment later. |
- ASAN_UNPOISON_MEMORY_REGION(start, keep->capacity()); |
-#ifdef DEBUG |
- // Zap the contents of the kept segment (but not the header). |
- memset(start, kZapDeadByte, keep->capacity()); |
-#endif |
} else { |
position_ = limit_ = 0; |
} |
@@ -178,49 +414,97 @@ void Zone::DeleteAll() { |
void Zone::DeleteKeptSegment() { |
-#ifdef DEBUG |
- // Constant byte value used for zapping dead memory in debug mode. |
- static const unsigned char kZapDeadByte = 0xcd; |
-#endif |
- |
DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr); |
if (segment_head_ != nullptr) { |
- size_t size = segment_head_->size(); |
-#ifdef DEBUG |
- // Un-poison first so the zapping doesn't trigger ASan complaints. |
- ASAN_UNPOISON_MEMORY_REGION(segment_head_, size); |
- // Zap the entire kept segment (including the header). |
- memset(segment_head_, kZapDeadByte, size); |
-#endif |
- DeleteSegment(segment_head_, size); |
- segment_head_ = nullptr; |
+ segment_bytes_allocated_ -= segment_head_->size(); |
+ allocator_->ChangeCurrentMemoryUsage( |
+ -static_cast<int64_t>(segment_head_->size())); |
+ if (!SegmentPool::AddSegmentToPool(segment_head_)) { |
+ SegmentPool::PushSegmentToGarbageStack(segment_head_); |
+ } |
} |
DCHECK(segment_bytes_allocated_ == 0); |
} |
-// Creates a new segment, sets it size, and pushes it to the front |
-// of the segment chain. Returns the new segment. |
Segment* Zone::NewSegment(size_t size) { |
- Segment* result = reinterpret_cast<Segment*>(allocator_->Allocate(size)); |
- segment_bytes_allocated_ += size; |
- if (result != nullptr) { |
- result->Initialize(segment_head_, size); |
- segment_head_ = result; |
+ Segment* result = SegmentPool::GetSegmentFromPool(size); |
+ |
+ if (!result) { |
+ v8::base::VirtualMemory vm(size, kSegmentAlignmentSize); |
+ |
+ if (!vm.IsReserved()) { |
+ V8::FatalProcessOutOfMemory("Zone"); |
+ return nullptr; |
+ } |
+ |
+ // PrintF("%f; +%lu;0\n", static_cast<double>(clock() - begin) / |
+ // CLOCKS_PER_SEC, size); |
+ |
+ Address base = Address(reinterpret_cast<uintptr_t>(vm.address()) & |
+ kSegmentAlignmentMask); |
+ |
+ // On Windows, VirtualMemory can fail to allocate aligned memory. |
+ if (base != vm.address()) { |
+ // Address is not aligned. |
+ base += kSegmentAlignmentSize; |
+ } |
+ |
+ // The address of the end of the virtual memory |
+ Address end = |
+ Address(reinterpret_cast<uintptr_t>(vm.address()) + vm.size()); |
+ |
+ // Check whether the virtual memory is big enough to fit our aligned chunk. |
+ DCHECK_LE(base + size, end); |
+ |
+ // In case the virtual memory is too big, we want to use as much of it as |
+ // possible. In normal segments, the segment alignment size is the upper |
+ // limit. |
+ if (size <= kSegmentAlignmentSize) { |
+ size = Min(static_cast<size_t>(end - base), kSegmentAlignmentSize); |
+ } |
+ |
+ if (!v8::base::VirtualMemory::CommitRegion(reinterpret_cast<void*>(base), |
+ size, false)) { |
+ V8::FatalProcessOutOfMemory("Zone"); |
+ return nullptr; |
+ } |
+ |
+ result = reinterpret_cast<Segment*>(base); |
+ result->Initialize(this, &vm, size); |
+ } else { |
+ result->set_zone(this); |
} |
+ |
+ segment_bytes_allocated_ += result->size(); |
+ allocator_->ChangeCurrentMemoryUsage(result->size()); |
+ |
return result; |
} |
+Address Zone::NewLargeObjectSegment(size_t size) { |
+ size_t new_size = CalculateSegmentSize(size); |
+ Segment* segment = NewSegment(new_size); |
-// Deletes the given segment. Does not touch the segment chain. |
-void Zone::DeleteSegment(Segment* segment, size_t size) { |
- segment_bytes_allocated_ -= size; |
- allocator_->Free(segment, size); |
-} |
+ if (segment_head_ == nullptr) { |
+ // This is the only case in which a large object segment becomes head of |
+ // the segment list. |
+ segment_head_ = segment; |
+ } else { |
+ // Large object segments should be inserted second into the list when |
+ // possible. |
+ segment->set_next(segment_head_->next()); |
+ segment_head_->set_next(segment); |
+ } |
+ Address result = RoundUp(segment->start(), kAlignment); |
+ DCHECK_EQ(GetZoneFromPointer(segment), this); |
+ DCHECK_EQ(GetZoneFromPointer(result), this); |
+ return result; |
+} |
-Address Zone::NewExpand(size_t size) { |
+Address Zone::NewNormalSegment(size_t size) { |
// Make sure the requested size is already properly aligned and that |
// there isn't enough room in the Zone to satisfy the request. |
DCHECK_EQ(size, RoundDown(size, kAlignment)); |
@@ -229,39 +513,24 @@ Address Zone::NewExpand(size_t size) { |
reinterpret_cast<uintptr_t>(position_) < |
size); |
- // Compute the new segment size. We use a 'high water mark' |
- // strategy, where we increase the segment size every time we expand |
- // except that we employ a maximum segment size when we delete. This |
- // is to avoid excessive malloc() and free() overhead. |
- Segment* head = segment_head_; |
- const size_t old_size = (head == nullptr) ? 0 : head->size(); |
- static const size_t kSegmentOverhead = sizeof(Segment) + kAlignment; |
- const size_t new_size_no_overhead = size + (old_size << 1); |
- size_t new_size = kSegmentOverhead + new_size_no_overhead; |
- const size_t min_new_size = kSegmentOverhead + size; |
- // Guard against integer overflow. |
- if (new_size_no_overhead < size || new_size < kSegmentOverhead) { |
- V8::FatalProcessOutOfMemory("Zone"); |
- return nullptr; |
- } |
- if (new_size < kMinimumSegmentSize) { |
- new_size = kMinimumSegmentSize; |
- } else if (new_size > kMaximumSegmentSize) { |
- // Limit the size of new segments to avoid growing the segment size |
- // exponentially, thus putting pressure on contiguous virtual address space. |
- // All the while making sure to allocate a segment large enough to hold the |
- // requested size. |
- new_size = Max(min_new_size, kMaximumSegmentSize); |
- } |
- if (new_size > INT_MAX) { |
- V8::FatalProcessOutOfMemory("Zone"); |
- return nullptr; |
- } |
+ DCHECK_LE(size, kMaximumSegmentSize + 0); |
+ |
+ size_t new_size = CalculateSegmentSize(size); |
+ const size_t old_size = |
+ (segment_head_ == nullptr) ? 0 : segment_head_->size(); |
+ new_size = Max(new_size, old_size << 1); |
+ new_size = Min(new_size, kMaximumSegmentSize); |
+ |
+ DCHECK_LE(new_size, kMaximumSegmentSize + 0); |
+ |
Segment* segment = NewSegment(new_size); |
- if (segment == nullptr) { |
- V8::FatalProcessOutOfMemory("Zone"); |
- return nullptr; |
- } |
+ |
+ // Put segment in front of the segment list. |
+ segment->set_next(segment_head_); |
+ segment_head_ = segment; |
+ |
+ // Normal segments must not be bigger than the alignment size. |
+ DCHECK_LE(segment->size(), kSegmentAlignmentSize + 0); |
// Recompute 'top' and 'limit' based on the new segment. |
Address result = RoundUp(segment->start(), kAlignment); |
@@ -269,12 +538,23 @@ Address Zone::NewExpand(size_t size) { |
// Check for address overflow. |
// (Should not happen since the segment is guaranteed to accomodate |
// size bytes + header and alignment padding) |
- DCHECK(reinterpret_cast<uintptr_t>(position_) >= |
- reinterpret_cast<uintptr_t>(result)); |
+ DCHECK_GE(reinterpret_cast<uintptr_t>(position_), |
+ reinterpret_cast<uintptr_t>(result)); |
+ DCHECK_EQ(GetZoneFromPointer(segment), this); |
+ DCHECK_EQ(GetZoneFromPointer(result), this); |
+ DCHECK_EQ(GetZoneFromPointer(segment->end() - 1), this); |
limit_ = segment->end(); |
DCHECK(position_ <= limit_); |
return result; |
} |
+size_t Zone::CalculateSegmentSize(const size_t requested) { |
+ if (UINTPTR_MAX - (sizeof(Segment) + kAlignment) < requested) { |
+ V8::FatalProcessOutOfMemory("Zone"); |
+ } |
+ |
+ return RoundUp(requested + sizeof(Segment) + kAlignment, kMinimumSegmentSize); |
+} |
+ |
} // namespace internal |
} // namespace v8 |