| Index: src/zone.cc | 
| diff --git a/src/zone.cc b/src/zone.cc | 
| index a10b63612e484962870ab2004c6bf2a7ff44f49a..3f209ea21f4c6a768b3b7c34b36e18465a73351d 100644 | 
| --- a/src/zone.cc | 
| +++ b/src/zone.cc | 
| @@ -43,33 +43,80 @@ const size_t kASanRedzoneBytes = 0; | 
|  | 
|  | 
| // Segments represent chunks of memory: They have starting address | 
| -// (encoded in the this pointer) and a size in bytes. Segments are | 
| +// (encoded in the this pointer) and a VirtualMemory instance. Segments are | 
| // chained together forming a LIFO structure with the newest segment | 
| -// available as segment_head_. Segments are allocated using malloc() | 
| -// and de-allocated using free(). | 
| +// available as segment_head_. Segments are allocated aligned via the | 
| +// VirtualMemory instance and released using it. | 
|  | 
| class Segment { | 
| public: | 
| -  void Initialize(Segment* next, size_t size) { | 
| -    next_ = next; | 
| -    size_ = size; | 
| +  void Initialize(Zone* zone, v8::base::VirtualMemory* virtual_memory) { | 
| +    DCHECK_EQ(reinterpret_cast<uintptr_t>(this) & Zone::kSegmentAlignmentMask, | 
| +              reinterpret_cast<uintptr_t>(this)); | 
| + | 
| +    next_ = nullptr; | 
| +    zone_ = zone; | 
| +    virtual_memory_.Reset(); | 
| +    virtual_memory_.TakeControl(virtual_memory); | 
| } | 
|  | 
| +  Zone* zone() const { return zone_; } | 
| Segment* next() const { return next_; } | 
| -  void clear_next() { next_ = nullptr; } | 
| +  void set_next(Segment* const value) { next_ = value; } | 
|  | 
| -  size_t size() const { return size_; } | 
| -  size_t capacity() const { return size_ - sizeof(Segment); } | 
| +  size_t size() const { | 
| +    return virtual_memory_.size() - | 
| +           (reinterpret_cast<uintptr_t>(this) - | 
| +            reinterpret_cast<uintptr_t>(virtual_memory_.address())); | 
| +  } | 
| + | 
| +  size_t capacity() const { return size() - sizeof(Segment); } | 
|  | 
| Address start() const { return address(sizeof(Segment)); } | 
| -  Address end() const { return address(size_); } | 
| +  Address end() const { return address(size()); } | 
| + | 
| +  bool is_big_object_segment() const { | 
| +    return capacity() > Zone::kMaximumSegmentSize; | 
| +  } | 
| + | 
| +  void Release() { | 
| +    v8::base::VirtualMemory vm = v8::base::VirtualMemory(); | 
| +    vm.TakeControl(&virtual_memory_); | 
| + | 
| +#ifdef DEBUG | 
| +    // Un-poison first so the zapping doesn't trigger ASan complaints. | 
| +    ASAN_UNPOISON_MEMORY_REGION(vm.address(), vm.size()); | 
| +    // Zap the entire current segment (including the header). | 
| +    memset(vm.address(), kZapDeadByte, vm.size()); | 
| +#endif | 
| + | 
| +    vm.Release(); | 
| +  } | 
| + | 
| +  void Reset() { | 
| +    // Un-poison so neither the zapping not the reusing does trigger ASan | 
| +    // complaints. | 
| +    ASAN_UNPOISON_MEMORY_REGION(virtual_memory_.address(), | 
| +                                virtual_memory_.size()); | 
| +#ifdef DEBUG | 
| +    // Zap the entire current segment (including the header). | 
| +    memset(reinterpret_cast<void*>(start()), kZapDeadByte, capacity()); | 
| +#endif | 
| +    next_ = nullptr; | 
| +  } | 
|  | 
| private: | 
| +#ifdef DEBUG | 
| +  // Constant byte value used for zapping dead memory in debug mode. | 
| +  static const unsigned char kZapDeadByte = 0xcd; | 
| +#endif | 
| + | 
| // Computes the address of the nth byte in this segment. | 
| Address address(size_t n) const { return Address(this) + n; } | 
|  | 
| +  Zone* zone_; | 
| Segment* next_; | 
| -  size_t size_; | 
| +  v8::base::VirtualMemory virtual_memory_; | 
| }; | 
|  | 
| Zone::Zone(base::AccountingAllocator* allocator) | 
| @@ -87,8 +134,37 @@ Zone::~Zone() { | 
| DCHECK(segment_bytes_allocated_ == 0); | 
| } | 
|  | 
| +Segment* Zone::GetZoneSegmentFromPointer(const void* ptr) { | 
| +  return reinterpret_cast<Segment*>(reinterpret_cast<uintptr_t>(ptr) & | 
| +                                    kSegmentAlignmentMask); | 
| +} | 
| + | 
| +Zone* Zone::GetZoneFromPointer(const void* ptr) { | 
| +  return GetZoneSegmentFromPointer(ptr)->zone(); | 
| +} | 
|  | 
| void* Zone::New(size_t size) { | 
| +  Address result = position_; | 
| + | 
| +  // corner case: zero size | 
| +  if (size == 0) { | 
| +    // there has to be a normal segment to reference | 
| +    if (segment_head_ == nullptr || segment_head_->is_big_object_segment()) { | 
| +      // we allocate a segment of minimal size | 
| +      result = NewNormalSegment(kAlignment); | 
| +    } | 
| + | 
| +    DCHECK(!GetZoneSegmentFromPointer(result)->is_big_object_segment()); | 
| +    DCHECK_EQ(GetZoneFromPointer(result), this); | 
| +    return reinterpret_cast<void*>(result); | 
| +  } | 
| + | 
| +  // Large objects are a special case and get their own segment to live in. | 
| +  if (CalculateSegmentSize(size) > kMaximumSegmentSize) { | 
| +    return reinterpret_cast<void*>(NewLargeObjectSegment(size)); | 
| +    DCHECK(GetZoneSegmentFromPointer(result)->is_big_object_segment()); | 
| +  } | 
| + | 
| // Round up the requested size to fit the alignment. | 
| size = RoundUp(size, kAlignment); | 
|  | 
| @@ -101,14 +177,13 @@ void* Zone::New(size_t size) { | 
| } | 
|  | 
| // Check if the requested size is available without expanding. | 
| -  Address result = position_; | 
|  | 
| const size_t size_with_redzone = size + kASanRedzoneBytes; | 
| const uintptr_t limit = reinterpret_cast<uintptr_t>(limit_); | 
| const uintptr_t position = reinterpret_cast<uintptr_t>(position_); | 
| // position_ > limit_ can be true after the alignment correction above. | 
| if (limit < position || size_with_redzone > limit - position) { | 
| -    result = NewExpand(size_with_redzone); | 
| +    result = NewNormalSegment(size_with_redzone); | 
| } else { | 
| position_ += size_with_redzone; | 
| } | 
| @@ -119,17 +194,14 @@ void* Zone::New(size_t size) { | 
|  | 
| // Check that the result has the proper alignment and return it. | 
| DCHECK(IsAddressAligned(result, kAlignment, 0)); | 
| +  DCHECK(!GetZoneSegmentFromPointer(result)->is_big_object_segment()); | 
| +  DCHECK_EQ(GetZoneFromPointer(result), this); | 
| allocation_size_ += size; | 
| return reinterpret_cast<void*>(result); | 
| } | 
|  | 
|  | 
| void Zone::DeleteAll() { | 
| -#ifdef DEBUG | 
| -  // Constant byte value used for zapping dead memory in debug mode. | 
| -  static const unsigned char kZapDeadByte = 0xcd; | 
| -#endif | 
| - | 
| // Find a segment with a suitable size to keep around. | 
| Segment* keep = nullptr; | 
| // Traverse the chained list of segments, zapping (in debug mode) | 
| @@ -139,16 +211,10 @@ void Zone::DeleteAll() { | 
| if (!keep && current->size() <= kMaximumKeptSegmentSize) { | 
| // Unlink the segment we wish to keep from the list. | 
| keep = current; | 
| -      keep->clear_next(); | 
| +      keep->Reset(); | 
| } else { | 
| -      size_t size = current->size(); | 
| -#ifdef DEBUG | 
| -      // Un-poison first so the zapping doesn't trigger ASan complaints. | 
| -      ASAN_UNPOISON_MEMORY_REGION(current, size); | 
| -      // Zap the entire current segment (including the header). | 
| -      memset(current, kZapDeadByte, size); | 
| -#endif | 
| -      DeleteSegment(current, size); | 
| +      segment_bytes_allocated_ -= current->size(); | 
| +      current->Release(); | 
| } | 
| current = next; | 
| } | 
| @@ -161,12 +227,6 @@ void Zone::DeleteAll() { | 
| Address start = keep->start(); | 
| position_ = RoundUp(start, kAlignment); | 
| limit_ = keep->end(); | 
| -    // Un-poison so we can re-use the segment later. | 
| -    ASAN_UNPOISON_MEMORY_REGION(start, keep->capacity()); | 
| -#ifdef DEBUG | 
| -    // Zap the contents of the kept segment (but not the header). | 
| -    memset(start, kZapDeadByte, keep->capacity()); | 
| -#endif | 
| } else { | 
| position_ = limit_ = 0; | 
| } | 
| @@ -178,21 +238,10 @@ void Zone::DeleteAll() { | 
|  | 
|  | 
| void Zone::DeleteKeptSegment() { | 
| -#ifdef DEBUG | 
| -  // Constant byte value used for zapping dead memory in debug mode. | 
| -  static const unsigned char kZapDeadByte = 0xcd; | 
| -#endif | 
| - | 
| DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr); | 
| if (segment_head_ != nullptr) { | 
| -    size_t size = segment_head_->size(); | 
| -#ifdef DEBUG | 
| -    // Un-poison first so the zapping doesn't trigger ASan complaints. | 
| -    ASAN_UNPOISON_MEMORY_REGION(segment_head_, size); | 
| -    // Zap the entire kept segment (including the header). | 
| -    memset(segment_head_, kZapDeadByte, size); | 
| -#endif | 
| -    DeleteSegment(segment_head_, size); | 
| +    segment_bytes_allocated_ -= segment_head_->size(); | 
| +    segment_head_->Release(); | 
| segment_head_ = nullptr; | 
| } | 
|  | 
| @@ -200,27 +249,49 @@ void Zone::DeleteKeptSegment() { | 
| } | 
|  | 
|  | 
| -// Creates a new segment, sets it size, and pushes it to the front | 
| -// of the segment chain. Returns the new segment. | 
| Segment* Zone::NewSegment(size_t size) { | 
| -  Segment* result = reinterpret_cast<Segment*>(allocator_->Allocate(size)); | 
| -  segment_bytes_allocated_ += size; | 
| -  if (result != nullptr) { | 
| -    result->Initialize(segment_head_, size); | 
| -    segment_head_ = result; | 
| +  v8::base::VirtualMemory vm(size, kSegmentAlignmentSize); | 
| + | 
| +  if (vm.IsReserved()) { | 
| +    DCHECK_EQ(reinterpret_cast<uintptr_t>(vm.address()) & kSegmentAlignmentMask, | 
| +              reinterpret_cast<uintptr_t>(vm.address())); | 
| +    DCHECK_EQ(vm.size(), size); | 
| +    v8::base::VirtualMemory::CommitRegion(vm.address(), vm.size(), false); | 
| + | 
| +    Segment* result = reinterpret_cast<Segment*>(vm.address()); | 
| + | 
| +    result->Initialize(this, &vm); | 
| + | 
| +    segment_bytes_allocated_ += result->size(); | 
| + | 
| +    return result; | 
| +  } else { | 
| +    V8::FatalProcessOutOfMemory("Zone"); | 
| +    return nullptr; | 
| } | 
| -  return result; | 
| } | 
|  | 
| +Address Zone::NewLargeObjectSegment(size_t size) { | 
| +  size_t new_size = CalculateSegmentSize(size); | 
| +  Segment* segment = NewSegment(new_size); | 
|  | 
| -// Deletes the given segment. Does not touch the segment chain. | 
| -void Zone::DeleteSegment(Segment* segment, size_t size) { | 
| -  segment_bytes_allocated_ -= size; | 
| -  allocator_->Free(segment, size); | 
| -} | 
| +  if (segment_head_ == nullptr) { | 
| +    // corner case in which a large object segment becomes the head | 
| +    // of the segment list. | 
| +    segment_head_ = segment; | 
| +  } else { | 
| +    // large object segments should be inserted second into the list | 
| +    segment->set_next(segment_head_->next()); | 
| +    segment_head_->set_next(segment); | 
| +  } | 
|  | 
| +  Address result = RoundUp(segment->start(), kAlignment); | 
| +  DCHECK_EQ(GetZoneFromPointer(segment), this); | 
| +  DCHECK_EQ(GetZoneFromPointer(result), this); | 
| +  return result; | 
| +} | 
|  | 
| -Address Zone::NewExpand(size_t size) { | 
| +Address Zone::NewNormalSegment(size_t size) { | 
| // Make sure the requested size is already properly aligned and that | 
| // there isn't enough room in the Zone to satisfy the request. | 
| DCHECK_EQ(size, RoundDown(size, kAlignment)); | 
| @@ -229,39 +300,26 @@ Address Zone::NewExpand(size_t size) { | 
| reinterpret_cast<uintptr_t>(position_) < | 
| size); | 
|  | 
| -  // Compute the new segment size. We use a 'high water mark' | 
| -  // strategy, where we increase the segment size every time we expand | 
| -  // except that we employ a maximum segment size when we delete. This | 
| -  // is to avoid excessive malloc() and free() overhead. | 
| -  Segment* head = segment_head_; | 
| -  const size_t old_size = (head == nullptr) ? 0 : head->size(); | 
| -  static const size_t kSegmentOverhead = sizeof(Segment) + kAlignment; | 
| -  const size_t new_size_no_overhead = size + (old_size << 1); | 
| -  size_t new_size = kSegmentOverhead + new_size_no_overhead; | 
| -  const size_t min_new_size = kSegmentOverhead + size; | 
| -  // Guard against integer overflow. | 
| -  if (new_size_no_overhead < size || new_size < kSegmentOverhead) { | 
| -    V8::FatalProcessOutOfMemory("Zone"); | 
| -    return nullptr; | 
| -  } | 
| -  if (new_size < kMinimumSegmentSize) { | 
| -    new_size = kMinimumSegmentSize; | 
| -  } else if (new_size > kMaximumSegmentSize) { | 
| -    // Limit the size of new segments to avoid growing the segment size | 
| -    // exponentially, thus putting pressure on contiguous virtual address space. | 
| -    // All the while making sure to allocate a segment large enough to hold the | 
| -    // requested size. | 
| -    new_size = Max(min_new_size, kMaximumSegmentSize); | 
| -  } | 
| -  if (new_size > INT_MAX) { | 
| -    V8::FatalProcessOutOfMemory("Zone"); | 
| -    return nullptr; | 
| -  } | 
| +  // Only normal segments here | 
| +  DCHECK_LE(size, kMaximumSegmentSize + 0); | 
| + | 
| +  size_t new_size = CalculateSegmentSize(size); | 
| +  const size_t old_size = | 
| +      (segment_head_ == nullptr) ? 0 : segment_head_->size(); | 
| +  new_size = Max(new_size, old_size << 1); | 
| +  new_size = Min(new_size, kMaximumSegmentSize); | 
| + | 
| +  // Rounding up shall not mess with our limits | 
| +  DCHECK_LE(new_size, kMaximumSegmentSize + 0); | 
| + | 
| Segment* segment = NewSegment(new_size); | 
| -  if (segment == nullptr) { | 
| -    V8::FatalProcessOutOfMemory("Zone"); | 
| -    return nullptr; | 
| -  } | 
| + | 
| +  // Put in front of the segment list | 
| +  segment->set_next(segment_head_); | 
| +  segment_head_ = segment; | 
| + | 
| +  // Normal segments must not be bigger than the alignment size | 
| +  DCHECK_LE(segment->size(), kSegmentAlignmentSize + 0); | 
|  | 
| // Recompute 'top' and 'limit' based on the new segment. | 
| Address result = RoundUp(segment->start(), kAlignment); | 
| @@ -269,12 +327,23 @@ Address Zone::NewExpand(size_t size) { | 
| // Check for address overflow. | 
| // (Should not happen since the segment is guaranteed to accomodate | 
| // size bytes + header and alignment padding) | 
| -  DCHECK(reinterpret_cast<uintptr_t>(position_) >= | 
| -         reinterpret_cast<uintptr_t>(result)); | 
| +  DCHECK_GE(reinterpret_cast<uintptr_t>(position_), | 
| +            reinterpret_cast<uintptr_t>(result)); | 
| +  DCHECK_EQ(GetZoneFromPointer(segment), this); | 
| +  DCHECK_EQ(GetZoneFromPointer(result), this); | 
| +  DCHECK_EQ(GetZoneFromPointer(segment->end() - 1), this); | 
| limit_ = segment->end(); | 
| DCHECK(position_ <= limit_); | 
| return result; | 
| } | 
|  | 
| +size_t Zone::CalculateSegmentSize(const size_t requested) { | 
| +  if (requested > INT_MAX) { | 
| +    V8::FatalProcessOutOfMemory("Zone"); | 
| +  } | 
| + | 
| +  return RoundUp(requested + sizeof(Segment) + kAlignment, kMinimumSegmentSize); | 
| +} | 
| + | 
| }  // namespace internal | 
| }  // namespace v8 | 
|  |