Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/zone.h" | 5 #include "src/zone.h" |
| 6 | 6 |
| 7 #include <cstring> | 7 #include <cstring> |
| 8 | 8 |
| 9 #include "src/v8.h" | 9 #include "src/v8.h" |
| 10 | 10 |
| (...skipping 25 matching lines...) Expand all Loading... | |
| 36 } while (false) | 36 } while (false) |
| 37 | 37 |
| 38 const size_t kASanRedzoneBytes = 0; | 38 const size_t kASanRedzoneBytes = 0; |
| 39 | 39 |
| 40 #endif // V8_USE_ADDRESS_SANITIZER | 40 #endif // V8_USE_ADDRESS_SANITIZER |
| 41 | 41 |
| 42 } // namespace | 42 } // namespace |
| 43 | 43 |
| 44 | 44 |
| 45 // Segments represent chunks of memory: They have starting address | 45 // Segments represent chunks of memory: They have starting address |
| 46 // (encoded in the this pointer) and a size in bytes. Segments are | 46 // (encoded in the this pointer) and a VirtualMemory instance. Segments are |
| 47 // chained together forming a LIFO structure with the newest segment | 47 // chained together forming a LIFO structure with the newest segment |
| 48 // available as segment_head_. Segments are allocated using malloc() | 48 // available as segment_head_. Segments are allocated aligned via the |
| 49 // and de-allocated using free(). | 49 // VirtualMemory instance and released using it. |
| 50 | 50 |
| 51 class Segment { | 51 class Segment { |
| 52 public: | 52 public: |
| 53 void Initialize(Segment* next, size_t size) { | 53 void Initialize(Zone* zone, v8::base::VirtualMemory* virtual_memory) { |
| 54 next_ = next; | 54 DCHECK_EQ(reinterpret_cast<uintptr_t>(this) & Zone::kSegmentAlignmentMask, |
| 55 size_ = size; | 55 reinterpret_cast<uintptr_t>(this)); |
| 56 | |
| 57 next_ = nullptr; | |
| 58 zone_ = zone; | |
| 59 virtual_memory_.Reset(); | |
| 60 virtual_memory_.TakeControl(virtual_memory); | |
| 56 } | 61 } |
| 57 | 62 |
| 63 Zone* zone() const { return zone_; } | |
| 58 Segment* next() const { return next_; } | 64 Segment* next() const { return next_; } |
| 59 void clear_next() { next_ = nullptr; } | 65 void set_next(Segment* const value) { next_ = value; } |
| 60 | 66 |
| 61 size_t size() const { return size_; } | 67 size_t size() const { return virtual_memory_.size(); } |
| 62 size_t capacity() const { return size_ - sizeof(Segment); } | 68 |
| 69 size_t capacity() const { return size() - sizeof(Segment); } | |
| 63 | 70 |
| 64 Address start() const { return address(sizeof(Segment)); } | 71 Address start() const { return address(sizeof(Segment)); } |
| 65 Address end() const { return address(size_); } | 72 Address end() const { return address(size()); } |
| 73 | |
| 74 bool is_big_object_segment() const { | |
| 75 return size() > Zone::kMaximumSegmentSize; | |
| 76 } | |
| 77 | |
| 78 void Release() { | |
| 79 v8::base::VirtualMemory vm = v8::base::VirtualMemory(); | |
| 80 vm.TakeControl(&virtual_memory_); | |
|
Jakob Kummerow
2016/09/02 14:11:49
Why is this dance necessary, as opposed to calling
heimbuef
2016/09/05 12:38:15
Because of the `memset` below. `virtual_memory_` w
| |
| 81 | |
| 82 #ifdef ENABLE_HANDLE_ZAPPING | |
| 83 // Un-poison first so the zapping doesn't trigger ASan complaints. | |
| 84 ASAN_UNPOISON_MEMORY_REGION(vm.address(), vm.size()); | |
| 85 // Zap the entire current segment (including the header). | |
| 86 memset(vm.address(), kZapDeadByte, vm.size()); | |
| 87 #endif | |
| 88 | |
| 89 vm.Release(); | |
| 90 } | |
| 91 | |
| 92 void Reset() { | |
| 93 // Un-poison so neither the zapping not the reusing does trigger ASan | |
| 94 // complaints. | |
| 95 ASAN_UNPOISON_MEMORY_REGION(virtual_memory_.address(), | |
| 96 virtual_memory_.size()); | |
| 97 #ifdef DEBUG | |
| 98 // Zap the entire current segment (including the header). | |
| 99 memset(reinterpret_cast<void*>(start()), kZapDeadByte, capacity()); | |
| 100 #endif | |
| 101 next_ = nullptr; | |
| 102 } | |
| 66 | 103 |
| 67 private: | 104 private: |
| 105 #ifdef DEBUG | |
| 106 // Constant byte value used for zapping dead memory in debug mode. | |
| 107 static const unsigned char kZapDeadByte = 0xcd; | |
| 108 #endif | |
| 109 | |
| 68 // Computes the address of the nth byte in this segment. | 110 // Computes the address of the nth byte in this segment. |
| 69 Address address(size_t n) const { return Address(this) + n; } | 111 Address address(size_t n) const { return Address(this) + n; } |
| 70 | 112 |
| 113 Zone* zone_; | |
| 71 Segment* next_; | 114 Segment* next_; |
| 72 size_t size_; | 115 v8::base::VirtualMemory virtual_memory_; |
| 116 | |
| 117 DISALLOW_COPY_AND_ASSIGN(Segment); | |
| 73 }; | 118 }; |
| 74 | 119 |
| 75 Zone::Zone(base::AccountingAllocator* allocator) | 120 Zone::Zone(base::AccountingAllocator* allocator) |
| 76 : allocation_size_(0), | 121 : allocation_size_(0), |
| 77 segment_bytes_allocated_(0), | 122 segment_bytes_allocated_(0), |
| 78 position_(0), | 123 position_(0), |
| 79 limit_(0), | 124 limit_(0), |
| 80 allocator_(allocator), | 125 allocator_(allocator), |
| 81 segment_head_(nullptr) {} | 126 segment_head_(nullptr) {} |
| 82 | 127 |
| 83 Zone::~Zone() { | 128 Zone::~Zone() { |
| 84 DeleteAll(); | 129 DeleteAll(); |
| 85 DeleteKeptSegment(); | 130 DeleteKeptSegment(); |
| 86 | 131 |
| 87 DCHECK(segment_bytes_allocated_ == 0); | 132 DCHECK(segment_bytes_allocated_ == 0); |
| 88 } | 133 } |
| 89 | 134 |
| 135 Segment* Zone::GetZoneSegmentFromPointer(const void* ptr) { | |
| 136 return reinterpret_cast<Segment*>(reinterpret_cast<uintptr_t>(ptr) & | |
| 137 kSegmentAlignmentMask); | |
| 138 } | |
| 139 | |
| 140 Zone* Zone::GetZoneFromPointer(const void* ptr) { | |
| 141 return GetZoneSegmentFromPointer(ptr)->zone(); | |
| 142 } | |
| 90 | 143 |
| 91 void* Zone::New(size_t size) { | 144 void* Zone::New(size_t size) { |
| 145 Address result = position_; | |
| 146 | |
| 147 // corner case: zero size | |
|
Jakob Kummerow
2016/09/02 14:11:48
nit: Comments should have proper capitalization, g
heimbuef
2016/09/05 12:38:15
Acknowledged.
| |
| 148 if (size == 0) { | |
|
Jakob Kummerow
2016/09/02 14:11:48
Does this ever happen?
heimbuef
2016/09/05 12:38:15
Yes, quite frequently. I also plan on using it mys
| |
| 149 // there has to be a normal segment to reference | |
| 150 if (segment_head_ == nullptr || segment_head_->is_big_object_segment()) { | |
| 151 // we allocate a segment of minimal size | |
| 152 result = NewNormalSegment(kAlignment); | |
| 153 } | |
| 154 | |
| 155 DCHECK(!GetZoneSegmentFromPointer(result)->is_big_object_segment()); | |
| 156 DCHECK_EQ(GetZoneFromPointer(result), this); | |
| 157 return reinterpret_cast<void*>(result); | |
| 158 } | |
| 159 | |
| 160 // Large objects are a special case and get their own segment to live in. | |
| 161 if (CalculateSegmentSize(size) > kMaximumSegmentSize) { | |
| 162 result = NewLargeObjectSegment(size); | |
| 163 DCHECK(GetZoneSegmentFromPointer(result)->is_big_object_segment()); | |
| 164 return reinterpret_cast<void*>(result); | |
| 165 } | |
| 166 | |
| 92 // Round up the requested size to fit the alignment. | 167 // Round up the requested size to fit the alignment. |
| 93 size = RoundUp(size, kAlignment); | 168 size = RoundUp(size, kAlignment); |
| 94 | 169 |
| 95 // If the allocation size is divisible by 8 then we return an 8-byte aligned | 170 // If the allocation size is divisible by 8 then we return an 8-byte aligned |
| 96 // address. | 171 // address. |
| 97 if (kPointerSize == 4 && kAlignment == 4) { | 172 if (kPointerSize == 4 && kAlignment == 4) { |
| 98 position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4); | 173 position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4); |
| 99 } else { | 174 } else { |
| 100 DCHECK(kAlignment >= kPointerSize); | 175 DCHECK(kAlignment >= kPointerSize); |
| 101 } | 176 } |
| 102 | 177 |
| 103 // Check if the requested size is available without expanding. | 178 // Check if the requested size is available without expanding. |
| 104 Address result = position_; | |
| 105 | 179 |
| 106 const size_t size_with_redzone = size + kASanRedzoneBytes; | 180 const size_t size_with_redzone = size + kASanRedzoneBytes; |
| 107 const uintptr_t limit = reinterpret_cast<uintptr_t>(limit_); | 181 const uintptr_t limit = reinterpret_cast<uintptr_t>(limit_); |
| 108 const uintptr_t position = reinterpret_cast<uintptr_t>(position_); | 182 const uintptr_t position = reinterpret_cast<uintptr_t>(position_); |
| 109 // position_ > limit_ can be true after the alignment correction above. | 183 // position_ > limit_ can be true after the alignment correction above. |
| 110 if (limit < position || size_with_redzone > limit - position) { | 184 if (limit < position || size_with_redzone > limit - position) { |
| 111 result = NewExpand(size_with_redzone); | 185 result = NewNormalSegment(size_with_redzone); |
| 112 } else { | 186 } else { |
| 113 position_ += size_with_redzone; | 187 position_ += size_with_redzone; |
| 114 } | 188 } |
| 115 | 189 |
| 116 Address redzone_position = result + size; | 190 Address redzone_position = result + size; |
| 117 DCHECK(redzone_position + kASanRedzoneBytes == position_); | 191 DCHECK(redzone_position + kASanRedzoneBytes == position_); |
| 118 ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes); | 192 ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes); |
| 119 | 193 |
| 120 // Check that the result has the proper alignment and return it. | 194 // Check that the result has the proper alignment and return it. |
| 121 DCHECK(IsAddressAligned(result, kAlignment, 0)); | 195 DCHECK(IsAddressAligned(result, kAlignment, 0)); |
| 196 DCHECK(!GetZoneSegmentFromPointer(result)->is_big_object_segment()); | |
| 197 DCHECK_EQ(GetZoneFromPointer(result), this); | |
| 122 allocation_size_ += size; | 198 allocation_size_ += size; |
| 123 return reinterpret_cast<void*>(result); | 199 return reinterpret_cast<void*>(result); |
| 124 } | 200 } |
| 125 | 201 |
| 126 | 202 |
| 127 void Zone::DeleteAll() { | 203 void Zone::DeleteAll() { |
| 128 #ifdef DEBUG | |
| 129 // Constant byte value used for zapping dead memory in debug mode. | |
| 130 static const unsigned char kZapDeadByte = 0xcd; | |
| 131 #endif | |
| 132 | |
| 133 // Find a segment with a suitable size to keep around. | 204 // Find a segment with a suitable size to keep around. |
| 134 Segment* keep = nullptr; | 205 Segment* keep = nullptr; |
| 135 // Traverse the chained list of segments, zapping (in debug mode) | 206 // Traverse the chained list of segments, zapping (in debug mode) |
| 136 // and freeing every segment except the one we wish to keep. | 207 // and freeing every segment except the one we wish to keep. |
| 137 for (Segment* current = segment_head_; current;) { | 208 for (Segment* current = segment_head_; current;) { |
| 138 Segment* next = current->next(); | 209 Segment* next = current->next(); |
| 139 if (!keep && current->size() <= kMaximumKeptSegmentSize) { | 210 if (!keep && current->size() <= kMaximumKeptSegmentSize) { |
| 140 // Unlink the segment we wish to keep from the list. | 211 // Unlink the segment we wish to keep from the list. |
| 141 keep = current; | 212 keep = current; |
| 142 keep->clear_next(); | 213 keep->Reset(); |
| 143 } else { | 214 } else { |
| 144 size_t size = current->size(); | 215 segment_bytes_allocated_ -= current->size(); |
| 145 #ifdef DEBUG | 216 allocator_->ChangeCurrentMemoryUsage( |
| 146 // Un-poison first so the zapping doesn't trigger ASan complaints. | 217 -static_cast<int64_t>(current->size())); |
| 147 ASAN_UNPOISON_MEMORY_REGION(current, size); | 218 current->Release(); |
| 148 // Zap the entire current segment (including the header). | |
| 149 memset(current, kZapDeadByte, size); | |
| 150 #endif | |
| 151 DeleteSegment(current, size); | |
| 152 } | 219 } |
| 153 current = next; | 220 current = next; |
| 154 } | 221 } |
| 155 | 222 |
| 156 // If we have found a segment we want to keep, we must recompute the | 223 // If we have found a segment we want to keep, we must recompute the |
| 157 // variables 'position' and 'limit' to prepare for future allocate | 224 // variables 'position' and 'limit' to prepare for future allocate |
| 158 // attempts. Otherwise, we must clear the position and limit to | 225 // attempts. Otherwise, we must clear the position and limit to |
| 159 // force a new segment to be allocated on demand. | 226 // force a new segment to be allocated on demand. |
| 160 if (keep) { | 227 if (keep) { |
| 161 Address start = keep->start(); | 228 Address start = keep->start(); |
| 162 position_ = RoundUp(start, kAlignment); | 229 position_ = RoundUp(start, kAlignment); |
| 163 limit_ = keep->end(); | 230 limit_ = keep->end(); |
| 164 // Un-poison so we can re-use the segment later. | |
| 165 ASAN_UNPOISON_MEMORY_REGION(start, keep->capacity()); | |
| 166 #ifdef DEBUG | |
| 167 // Zap the contents of the kept segment (but not the header). | |
| 168 memset(start, kZapDeadByte, keep->capacity()); | |
| 169 #endif | |
| 170 } else { | 231 } else { |
| 171 position_ = limit_ = 0; | 232 position_ = limit_ = 0; |
| 172 } | 233 } |
| 173 | 234 |
| 174 allocation_size_ = 0; | 235 allocation_size_ = 0; |
| 175 // Update the head segment to be the kept segment (if any). | 236 // Update the head segment to be the kept segment (if any). |
| 176 segment_head_ = keep; | 237 segment_head_ = keep; |
| 177 } | 238 } |
| 178 | 239 |
| 179 | 240 |
| 180 void Zone::DeleteKeptSegment() { | 241 void Zone::DeleteKeptSegment() { |
| 181 #ifdef DEBUG | |
| 182 // Constant byte value used for zapping dead memory in debug mode. | |
| 183 static const unsigned char kZapDeadByte = 0xcd; | |
| 184 #endif | |
| 185 | |
| 186 DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr); | 242 DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr); |
| 187 if (segment_head_ != nullptr) { | 243 if (segment_head_ != nullptr) { |
| 188 size_t size = segment_head_->size(); | 244 segment_bytes_allocated_ -= segment_head_->size(); |
| 189 #ifdef DEBUG | 245 allocator_->ChangeCurrentMemoryUsage( |
| 190 // Un-poison first so the zapping doesn't trigger ASan complaints. | 246 -static_cast<int64_t>(segment_head_->size())); |
| 191 ASAN_UNPOISON_MEMORY_REGION(segment_head_, size); | 247 segment_head_->Release(); |
| 192 // Zap the entire kept segment (including the header). | |
| 193 memset(segment_head_, kZapDeadByte, size); | |
| 194 #endif | |
| 195 DeleteSegment(segment_head_, size); | |
| 196 segment_head_ = nullptr; | 248 segment_head_ = nullptr; |
| 197 } | 249 } |
| 198 | 250 |
| 199 DCHECK(segment_bytes_allocated_ == 0); | 251 DCHECK(segment_bytes_allocated_ == 0); |
| 200 } | 252 } |
| 201 | 253 |
| 202 | 254 |
| 203 // Creates a new segment, sets it size, and pushes it to the front | |
| 204 // of the segment chain. Returns the new segment. | |
| 205 Segment* Zone::NewSegment(size_t size) { | 255 Segment* Zone::NewSegment(size_t size) { |
| 206 Segment* result = reinterpret_cast<Segment*>(allocator_->Allocate(size)); | 256 v8::base::VirtualMemory vm(size, kSegmentAlignmentSize); |
| 207 segment_bytes_allocated_ += size; | 257 |
| 208 if (result != nullptr) { | 258 if (vm.IsReserved()) { |
|
Jakob Kummerow
2016/09/02 14:11:49
style nit: I'd reverse the condition to both make
heimbuef
2016/09/05 12:38:15
Acknowledged.
| |
| 209 result->Initialize(segment_head_, size); | 259 DCHECK_EQ(reinterpret_cast<uintptr_t>(vm.address()) & kSegmentAlignmentMask, |
| 210 segment_head_ = result; | 260 reinterpret_cast<uintptr_t>(vm.address())); |
| 261 DCHECK_EQ(vm.size(), size); | |
| 262 v8::base::VirtualMemory::CommitRegion(vm.address(), vm.size(), false); | |
|
Jakob Kummerow
2016/09/02 14:11:48
This can fail! You must call FatalProcessOutOfMemo
heimbuef
2016/09/05 12:38:15
Acknowledged.
| |
| 263 | |
| 264 Segment* result = reinterpret_cast<Segment*>(vm.address()); | |
| 265 | |
| 266 result->Initialize(this, &vm); | |
| 267 | |
| 268 segment_bytes_allocated_ += result->size(); | |
| 269 allocator_->ChangeCurrentMemoryUsage(result->size()); | |
| 270 | |
| 271 return result; | |
| 272 } else { | |
| 273 V8::FatalProcessOutOfMemory("Zone"); | |
| 274 return nullptr; | |
| 211 } | 275 } |
| 276 } | |
| 277 | |
| 278 Address Zone::NewLargeObjectSegment(size_t size) { | |
| 279 size_t new_size = CalculateSegmentSize(size); | |
| 280 Segment* segment = NewSegment(new_size); | |
| 281 | |
| 282 if (segment_head_ == nullptr) { | |
| 283 // corner case in which a large object segment becomes the head | |
|
Jakob Kummerow
2016/09/02 14:11:48
nit: capitalization please
heimbuef
2016/09/05 12:38:15
Acknowledged.
| |
| 284 // of the segment list. | |
| 285 segment_head_ = segment; | |
| 286 } else { | |
| 287 // large object segments should be inserted second into the list | |
|
Jakob Kummerow
2016/09/02 14:11:48
again, and punctuation
heimbuef
2016/09/05 12:38:15
Acknowledged.
| |
| 288 segment->set_next(segment_head_->next()); | |
| 289 segment_head_->set_next(segment); | |
| 290 } | |
| 291 | |
| 292 Address result = RoundUp(segment->start(), kAlignment); | |
| 293 DCHECK_EQ(GetZoneFromPointer(segment), this); | |
| 294 DCHECK_EQ(GetZoneFromPointer(result), this); | |
| 212 return result; | 295 return result; |
| 213 } | 296 } |
| 214 | 297 |
| 215 | 298 Address Zone::NewNormalSegment(size_t size) { |
| 216 // Deletes the given segment. Does not touch the segment chain. | |
| 217 void Zone::DeleteSegment(Segment* segment, size_t size) { | |
| 218 segment_bytes_allocated_ -= size; | |
| 219 allocator_->Free(segment, size); | |
| 220 } | |
| 221 | |
| 222 | |
| 223 Address Zone::NewExpand(size_t size) { | |
| 224 // Make sure the requested size is already properly aligned and that | 299 // Make sure the requested size is already properly aligned and that |
| 225 // there isn't enough room in the Zone to satisfy the request. | 300 // there isn't enough room in the Zone to satisfy the request. |
| 226 DCHECK_EQ(size, RoundDown(size, kAlignment)); | 301 DCHECK_EQ(size, RoundDown(size, kAlignment)); |
| 227 DCHECK(limit_ < position_ || | 302 DCHECK(limit_ < position_ || |
| 228 reinterpret_cast<uintptr_t>(limit_) - | 303 reinterpret_cast<uintptr_t>(limit_) - |
| 229 reinterpret_cast<uintptr_t>(position_) < | 304 reinterpret_cast<uintptr_t>(position_) < |
| 230 size); | 305 size); |
| 231 | 306 |
| 232 // Compute the new segment size. We use a 'high water mark' | 307 // Only normal segments here |
|
Jakob Kummerow
2016/09/02 14:11:49
nit: punctuation, again several times below
heimbuef
2016/09/05 12:38:15
Acknowledged.
| |
| 233 // strategy, where we increase the segment size every time we expand | 308 DCHECK_LE(size, kMaximumSegmentSize + 0); |
|
Jakob Kummerow
2016/09/02 14:11:48
why "+ 0"?
heimbuef
2016/09/05 12:38:15
Because the Macro does not compile otherwise. I ca
| |
| 234 // except that we employ a maximum segment size when we delete. This | 309 |
| 235 // is to avoid excessive malloc() and free() overhead. | 310 size_t new_size = CalculateSegmentSize(size); |
| 236 Segment* head = segment_head_; | 311 const size_t old_size = |
| 237 const size_t old_size = (head == nullptr) ? 0 : head->size(); | 312 (segment_head_ == nullptr) ? 0 : segment_head_->size(); |
| 238 static const size_t kSegmentOverhead = sizeof(Segment) + kAlignment; | 313 new_size = Max(new_size, old_size << 1); |
| 239 const size_t new_size_no_overhead = size + (old_size << 1); | 314 new_size = Min(new_size, kMaximumSegmentSize); |
| 240 size_t new_size = kSegmentOverhead + new_size_no_overhead; | 315 |
| 241 const size_t min_new_size = kSegmentOverhead + size; | 316 // Rounding up shall not mess with our limits |
| 242 // Guard against integer overflow. | 317 DCHECK_LE(new_size, kMaximumSegmentSize + 0); |
| 243 if (new_size_no_overhead < size || new_size < kSegmentOverhead) { | 318 |
| 244 V8::FatalProcessOutOfMemory("Zone"); | |
| 245 return nullptr; | |
| 246 } | |
| 247 if (new_size < kMinimumSegmentSize) { | |
| 248 new_size = kMinimumSegmentSize; | |
| 249 } else if (new_size > kMaximumSegmentSize) { | |
| 250 // Limit the size of new segments to avoid growing the segment size | |
| 251 // exponentially, thus putting pressure on contiguous virtual address space. | |
| 252 // All the while making sure to allocate a segment large enough to hold the | |
| 253 // requested size. | |
| 254 new_size = Max(min_new_size, kMaximumSegmentSize); | |
| 255 } | |
| 256 if (new_size > INT_MAX) { | |
| 257 V8::FatalProcessOutOfMemory("Zone"); | |
| 258 return nullptr; | |
| 259 } | |
| 260 Segment* segment = NewSegment(new_size); | 319 Segment* segment = NewSegment(new_size); |
| 261 if (segment == nullptr) { | 320 |
| 262 V8::FatalProcessOutOfMemory("Zone"); | 321 // Put in front of the segment list |
| 263 return nullptr; | 322 segment->set_next(segment_head_); |
| 264 } | 323 segment_head_ = segment; |
| 324 | |
| 325 // Normal segments must not be bigger than the alignment size | |
| 326 DCHECK_LE(segment->size(), kSegmentAlignmentSize + 0); | |
| 265 | 327 |
| 266 // Recompute 'top' and 'limit' based on the new segment. | 328 // Recompute 'top' and 'limit' based on the new segment. |
| 267 Address result = RoundUp(segment->start(), kAlignment); | 329 Address result = RoundUp(segment->start(), kAlignment); |
| 268 position_ = result + size; | 330 position_ = result + size; |
| 269 // Check for address overflow. | 331 // Check for address overflow. |
| 270 // (Should not happen since the segment is guaranteed to accomodate | 332 // (Should not happen since the segment is guaranteed to accomodate |
| 271 // size bytes + header and alignment padding) | 333 // size bytes + header and alignment padding) |
| 272 DCHECK(reinterpret_cast<uintptr_t>(position_) >= | 334 DCHECK_GE(reinterpret_cast<uintptr_t>(position_), |
| 273 reinterpret_cast<uintptr_t>(result)); | 335 reinterpret_cast<uintptr_t>(result)); |
| 336 DCHECK_EQ(GetZoneFromPointer(segment), this); | |
| 337 DCHECK_EQ(GetZoneFromPointer(result), this); | |
| 338 DCHECK_EQ(GetZoneFromPointer(segment->end() - 1), this); | |
| 274 limit_ = segment->end(); | 339 limit_ = segment->end(); |
| 275 DCHECK(position_ <= limit_); | 340 DCHECK(position_ <= limit_); |
| 276 return result; | 341 return result; |
| 277 } | 342 } |
| 278 | 343 |
| 344 size_t Zone::CalculateSegmentSize(const size_t requested) { | |
| 345 if (UINTPTR_MAX - (sizeof(Segment) + kAlignment) < requested) { | |
| 346 V8::FatalProcessOutOfMemory("Zone"); | |
| 347 } | |
| 348 | |
| 349 return RoundUp(requested + sizeof(Segment) + kAlignment, kMinimumSegmentSize); | |
| 350 } | |
| 351 | |
| 279 } // namespace internal | 352 } // namespace internal |
| 280 } // namespace v8 | 353 } // namespace v8 |
| OLD | NEW |