OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/zone.h" | 5 #include "src/zone.h" |
6 | 6 |
7 #include <cstring> | 7 #include <cstring> |
| 8 #include "include/v8-platform.h" |
| 9 #include "src/base/platform/time.h" |
8 | 10 |
9 #include "src/v8.h" | 11 #include "src/v8.h" |
10 | 12 |
11 #ifdef V8_USE_ADDRESS_SANITIZER | 13 #ifdef V8_USE_ADDRESS_SANITIZER |
12 #include <sanitizer/asan_interface.h> | 14 #include <sanitizer/asan_interface.h> |
13 #endif // V8_USE_ADDRESS_SANITIZER | 15 #endif // V8_USE_ADDRESS_SANITIZER |
14 | 16 |
15 namespace v8 { | 17 namespace v8 { |
16 namespace internal { | 18 namespace internal { |
17 | 19 |
(...skipping 16 matching lines...) Expand all Loading... |
34 USE(start); \ | 36 USE(start); \ |
35 USE(size); \ | 37 USE(size); \ |
36 } while (false) | 38 } while (false) |
37 | 39 |
38 const size_t kASanRedzoneBytes = 0; | 40 const size_t kASanRedzoneBytes = 0; |
39 | 41 |
40 #endif // V8_USE_ADDRESS_SANITIZER | 42 #endif // V8_USE_ADDRESS_SANITIZER |
41 | 43 |
42 } // namespace | 44 } // namespace |
43 | 45 |
| 46 clock_t begin = clock(); |
44 | 47 |
45 // Segments represent chunks of memory: They have starting address | 48 // Segments represent chunks of memory: They have starting address |
46 // (encoded in the this pointer) and a size in bytes. Segments are | 49 // (encoded in the this pointer) and a VirtualMemory instance. Segments are |
47 // chained together forming a LIFO structure with the newest segment | 50 // chained together forming a LIFO structure with the newest segment |
48 // available as segment_head_. Segments are allocated using malloc() | 51 // available as segment_head_. Segments are allocated aligned via the |
49 // and de-allocated using free(). | 52 // VirtualMemory instance and released using it. |
50 | 53 |
51 class Segment { | 54 class Segment { |
52 public: | 55 public: |
53 void Initialize(Segment* next, size_t size) { | 56 void Initialize(Zone* zone, v8::base::VirtualMemory* virtual_memory, |
54 next_ = next; | 57 size_t size) { |
| 58 DCHECK_EQ(reinterpret_cast<uintptr_t>(this) & Zone::kSegmentAlignmentMask, |
| 59 reinterpret_cast<uintptr_t>(this)); |
| 60 |
| 61 next_ = nullptr; |
| 62 zone_ = zone; |
| 63 virtual_memory_.Reset(); |
| 64 virtual_memory_.TakeControl(virtual_memory); |
55 size_ = size; | 65 size_ = size; |
56 } | 66 } |
57 | 67 |
| 68 void set_zone(Zone* zone) { zone_ = zone; } |
| 69 |
| 70 Zone* zone() const { return zone_; } |
58 Segment* next() const { return next_; } | 71 Segment* next() const { return next_; } |
59 void clear_next() { next_ = nullptr; } | 72 void set_next(Segment* const value) { next_ = value; } |
60 | 73 |
61 size_t size() const { return size_; } | 74 size_t size() const { return size_; } |
62 size_t capacity() const { return size_ - sizeof(Segment); } | 75 |
| 76 size_t capacity() const { return size() - sizeof(Segment); } |
63 | 77 |
64 Address start() const { return address(sizeof(Segment)); } | 78 Address start() const { return address(sizeof(Segment)); } |
65 Address end() const { return address(size_); } | 79 Address end() const { return address(size()); } |
| 80 |
| 81 bool is_big_object_segment() const { |
| 82 return size() > Zone::kMaximumSegmentSize; |
| 83 } |
| 84 |
| 85 void Release() { |
| 86 // PrintF("%f; -%lu;0\n", static_cast<double>(clock() - begin) / CLOCKS_PER_SEC, |
| 87 // size_); |
| 88 #ifdef ENABLE_HANDLE_ZAPPING |
| 89 // We are going to zap the memory the segment is stored in, so we |
| 90 // need to save the virtual memory information to be able to release |
| 91 // it. |
| 92 v8::base::VirtualMemory vm = v8::base::VirtualMemory(); |
| 93 vm.TakeControl(&virtual_memory_); |
| 94 // Un-poison first so the zapping doesn't trigger ASan complaints. |
| 95 ASAN_UNPOISON_MEMORY_REGION(this, size_); |
| 96 // Zap the entire current segment (including the header). |
| 97 memset(this, kZapDeadByte, size_); |
| 98 |
| 99 vm.Release(); |
| 100 #else |
| 101 virtual_memory_.Release(); |
| 102 #endif |
| 103 } |
| 104 |
| 105 void Reset() { |
| 106 // Un-poison so neither the zapping not the reusing does trigger ASan |
| 107 // complaints. |
| 108 ASAN_UNPOISON_MEMORY_REGION(virtual_memory_.address(), |
| 109 virtual_memory_.size()); |
| 110 #ifdef ENABLE_HANDLE_ZAPPING |
| 111 // Zap the entire current segment (excluding the header). |
| 112 memset(reinterpret_cast<void*>(start()), kZapDeadByte, capacity()); |
| 113 #endif |
| 114 next_ = nullptr; |
| 115 } |
66 | 116 |
67 private: | 117 private: |
| 118 #ifdef ENABLE_HANDLE_ZAPPING |
| 119 // Constant byte value used for zapping dead memory in debug mode. |
| 120 static const unsigned char kZapDeadByte = 0xcd; |
| 121 #endif |
| 122 |
68 // Computes the address of the nth byte in this segment. | 123 // Computes the address of the nth byte in this segment. |
69 Address address(size_t n) const { return Address(this) + n; } | 124 Address address(size_t n) const { return Address(this) + n; } |
70 | 125 |
| 126 Zone* zone_; |
71 Segment* next_; | 127 Segment* next_; |
| 128 v8::base::VirtualMemory virtual_memory_; |
| 129 |
72 size_t size_; | 130 size_t size_; |
| 131 |
| 132 DISALLOW_COPY_AND_ASSIGN(Segment); |
73 }; | 133 }; |
74 | 134 |
| 135 namespace SegmentPool { |
| 136 namespace { |
| 137 static const uint8_t kMinSegmentSizePower = 13; |
| 138 static const uint8_t kMaxSegmentSizePower = 17; |
| 139 |
| 140 static const uint8_t kMaxSegmentsPerBucket = 15; |
| 141 |
| 142 STATIC_ASSERT(kMinSegmentSizePower <= kMaxSegmentSizePower); |
| 143 |
| 144 static Segment* garbage_segment_stack_head_ = nullptr; |
| 145 |
| 146 static size_t garbage_segment_stack_size_ = 0; |
| 147 |
| 148 static v8::base::Mutex* garbage_segments_mutex_ = new base::Mutex(); |
| 149 |
| 150 static Segment** unused_segments_heads_ = |
| 151 new Segment*[1 + kMaxSegmentSizePower - kMinSegmentSizePower]; |
| 152 |
| 153 static size_t* unused_segments_sizes = |
| 154 new size_t[1 + kMaxSegmentSizePower - kMinSegmentSizePower]; |
| 155 |
| 156 static size_t unused_segments_size_ = 0; |
| 157 |
| 158 static v8::base::Mutex* unused_segments_mutex_ = new base::Mutex(); |
| 159 |
| 160 static v8::base::Semaphore* cleanup_semaphore = new base::Semaphore(1); |
| 161 |
| 162 static Segment* PopSegmentFromGarbageStack() { |
| 163 garbage_segments_mutex_->Lock(); |
| 164 auto result = garbage_segment_stack_head_; |
| 165 |
| 166 if (result) { |
| 167 garbage_segment_stack_head_ = result->next(); |
| 168 garbage_segment_stack_size_ -= result->size(); |
| 169 } |
| 170 |
| 171 garbage_segments_mutex_->Unlock(); |
| 172 |
| 173 return result; |
| 174 } |
| 175 |
| 176 class SegmentReleaser : public Task { |
| 177 public: |
| 178 void Run() override { |
| 179 ReleaseGarbage(); |
| 180 cleanup_semaphore->Signal(); |
| 181 } |
| 182 |
| 183 private: |
| 184 static void ReleaseGarbage() { |
| 185 while (true) { |
| 186 Segment* segment = PopSegmentFromGarbageStack(); |
| 187 |
| 188 if (segment == nullptr) break; |
| 189 |
| 190 segment->Release(); |
| 191 } |
| 192 } |
| 193 }; |
| 194 |
| 195 static void SignalGC() { |
| 196 if (cleanup_semaphore->WaitFor(base::TimeDelta::FromSeconds(0))) { |
| 197 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
| 198 new SegmentReleaser(), Platform::kShortRunningTask); |
| 199 } |
| 200 } |
| 201 } // namespace |
| 202 |
| 203 static void PushSegmentToGarbageStack(Segment* segment) { |
| 204 garbage_segments_mutex_->Lock(); |
| 205 segment->set_next(garbage_segment_stack_head_); |
| 206 garbage_segment_stack_head_ = segment; |
| 207 garbage_segment_stack_size_ += segment->size(); |
| 208 |
| 209 if (garbage_segment_stack_size_ > 1 << 20) { |
| 210 SignalGC(); |
| 211 } |
| 212 |
| 213 garbage_segments_mutex_->Unlock(); |
| 214 } |
| 215 |
| 216 static Segment* GetSegmentFromPool(size_t requested_size) { |
| 217 if (requested_size > 1 << kMaxSegmentSizePower) { |
| 218 return nullptr; |
| 219 } |
| 220 |
| 221 uint8_t power = kMinSegmentSizePower; |
| 222 |
| 223 while (requested_size > 1 << power) power++; |
| 224 |
| 225 power -= kMinSegmentSizePower; |
| 226 |
| 227 DCHECK_GE(power, 0); |
| 228 |
| 229 unused_segments_mutex_->Lock(); |
| 230 |
| 231 Segment* segment = unused_segments_heads_[power]; |
| 232 |
| 233 if (segment) { |
| 234 unused_segments_heads_[power] = segment->next(); |
| 235 segment->set_next(nullptr); |
| 236 |
| 237 unused_segments_sizes[power]--; |
| 238 unused_segments_size_ -= segment->size(); |
| 239 } |
| 240 |
| 241 unused_segments_mutex_->Unlock(); |
| 242 |
| 243 if (segment) { |
| 244 DCHECK_GE(segment->size(), requested_size); |
| 245 // PrintF("%f; 0;-%lu\n", static_cast<double>(clock() - begin) / |
| 246 // CLOCKS_PER_SEC, segment->size()); |
| 247 } |
| 248 return segment; |
| 249 } |
| 250 |
| 251 static bool AddSegmentToPool(Segment* segment) { |
| 252 size_t size = segment->size(); |
| 253 |
| 254 if (size >= (1 << (kMaxSegmentSizePower + 1))) { |
| 255 return false; |
| 256 } |
| 257 |
| 258 if (size < (1 << kMinSegmentSizePower)) { |
| 259 return false; |
| 260 } |
| 261 |
| 262 uint8_t power = kMaxSegmentSizePower; |
| 263 |
| 264 while (size < 1 << power) power--; |
| 265 |
| 266 power -= kMinSegmentSizePower; |
| 267 |
| 268 DCHECK_GE(power, 0); |
| 269 |
| 270 unused_segments_mutex_->Lock(); |
| 271 |
| 272 if (unused_segments_sizes[power] >= kMaxSegmentsPerBucket) { |
| 273 unused_segments_mutex_->Unlock(); |
| 274 return false; |
| 275 } |
| 276 |
| 277 segment->set_next(unused_segments_heads_[power]); |
| 278 unused_segments_heads_[power] = segment; |
| 279 unused_segments_size_ += size; |
| 280 unused_segments_sizes[power]++; |
| 281 |
| 282 unused_segments_mutex_->Unlock(); |
| 283 |
| 284 // PrintF("%f; 0;+%lu\n", static_cast<double>(clock() - begin) / |
| 285 // CLOCKS_PER_SEC, size); |
| 286 |
| 287 return true; |
| 288 } |
| 289 } // namespace SegmentPool |
| 290 |
75 Zone::Zone(base::AccountingAllocator* allocator) | 291 Zone::Zone(base::AccountingAllocator* allocator) |
76 : allocation_size_(0), | 292 : allocation_size_(0), |
77 segment_bytes_allocated_(0), | 293 segment_bytes_allocated_(0), |
78 position_(0), | 294 position_(0), |
79 limit_(0), | 295 limit_(0), |
80 allocator_(allocator), | 296 allocator_(allocator), |
81 segment_head_(nullptr) {} | 297 segment_head_(nullptr) {} |
82 | 298 |
83 Zone::~Zone() { | 299 Zone::~Zone() { |
84 DeleteAll(); | 300 DeleteAll(); |
85 DeleteKeptSegment(); | 301 DeleteKeptSegment(); |
86 | 302 |
87 DCHECK(segment_bytes_allocated_ == 0); | 303 DCHECK(segment_bytes_allocated_ == 0); |
88 } | 304 } |
89 | 305 |
| 306 Segment* Zone::GetZoneSegmentFromPointer(const void* ptr) { |
| 307 return reinterpret_cast<Segment*>(reinterpret_cast<uintptr_t>(ptr) & |
| 308 kSegmentAlignmentMask); |
| 309 } |
| 310 |
| 311 Zone* Zone::GetZoneFromPointer(const void* ptr) { |
| 312 return GetZoneSegmentFromPointer(ptr)->zone(); |
| 313 } |
90 | 314 |
91 void* Zone::New(size_t size) { | 315 void* Zone::New(size_t size) { |
92 // Round up the requested size to fit the alignment. | 316 // Round up the requested size to fit the alignment. |
93 size = RoundUp(size, kAlignment); | 317 size = RoundUp(size, kAlignment); |
94 | 318 |
95 // If the allocation size is divisible by 8 then we return an 8-byte aligned | 319 // If the allocation size is divisible by 8 then we return an 8-byte aligned |
96 // address. | 320 // address. |
97 if (kPointerSize == 4 && kAlignment == 4) { | 321 if (kPointerSize == 4 && kAlignment == 4) { |
98 position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4); | 322 position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4); |
99 } else { | 323 } else { |
100 DCHECK(kAlignment >= kPointerSize); | 324 DCHECK(kAlignment >= kPointerSize); |
101 } | 325 } |
102 | 326 |
103 // Check if the requested size is available without expanding. | 327 // Check if the requested size is available without expanding. |
104 Address result = position_; | 328 Address result = position_; |
105 | 329 |
| 330 // In case the requested size is zero, we still want to return a pointer |
| 331 // to a valid segment, so the zone is obtainable from it. |
| 332 if (size == 0) { |
| 333 // there has to be a normal segment to reference |
| 334 if (segment_head_ == nullptr || segment_head_->is_big_object_segment()) { |
| 335 // We create a segment of minimal size. |
| 336 result = NewNormalSegment(kAlignment); |
| 337 } |
| 338 |
| 339 DCHECK(!GetZoneSegmentFromPointer(result)->is_big_object_segment()); |
| 340 DCHECK_EQ(GetZoneFromPointer(result), this); |
| 341 return reinterpret_cast<void*>(result); |
| 342 } |
| 343 |
| 344 // Large objects are a special case and get their own segment to live in. |
| 345 if (CalculateSegmentSize(size) > kMaximumSegmentSize) { |
| 346 result = NewLargeObjectSegment(size); |
| 347 DCHECK(GetZoneSegmentFromPointer(result)->is_big_object_segment()); |
| 348 allocation_size_ += size; |
| 349 return reinterpret_cast<void*>(result); |
| 350 } |
| 351 |
106 const size_t size_with_redzone = size + kASanRedzoneBytes; | 352 const size_t size_with_redzone = size + kASanRedzoneBytes; |
107 const uintptr_t limit = reinterpret_cast<uintptr_t>(limit_); | 353 const uintptr_t limit = reinterpret_cast<uintptr_t>(limit_); |
108 const uintptr_t position = reinterpret_cast<uintptr_t>(position_); | 354 const uintptr_t position = reinterpret_cast<uintptr_t>(position_); |
109 // position_ > limit_ can be true after the alignment correction above. | 355 // position_ > limit_ can be true after the alignment correction above. |
110 if (limit < position || size_with_redzone > limit - position) { | 356 if (limit < position || size_with_redzone > limit - position) { |
111 result = NewExpand(size_with_redzone); | 357 result = NewNormalSegment(size_with_redzone); |
112 } else { | 358 } else { |
113 position_ += size_with_redzone; | 359 position_ += size_with_redzone; |
114 } | 360 } |
115 | 361 |
116 Address redzone_position = result + size; | 362 Address redzone_position = result + size; |
117 DCHECK(redzone_position + kASanRedzoneBytes == position_); | 363 DCHECK_EQ(redzone_position + kASanRedzoneBytes, position_); |
118 ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes); | 364 ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes); |
119 | 365 |
120 // Check that the result has the proper alignment and return it. | 366 // Check that the result has the proper alignment and return it. |
121 DCHECK(IsAddressAligned(result, kAlignment, 0)); | 367 DCHECK(IsAddressAligned(result, kAlignment, 0)); |
| 368 DCHECK(!GetZoneSegmentFromPointer(result)->is_big_object_segment()); |
| 369 DCHECK_EQ(GetZoneFromPointer(result), this); |
122 allocation_size_ += size; | 370 allocation_size_ += size; |
123 return reinterpret_cast<void*>(result); | 371 return reinterpret_cast<void*>(result); |
124 } | 372 } |
125 | 373 |
126 | 374 |
127 void Zone::DeleteAll() { | 375 void Zone::DeleteAll() { |
128 #ifdef DEBUG | |
129 // Constant byte value used for zapping dead memory in debug mode. | |
130 static const unsigned char kZapDeadByte = 0xcd; | |
131 #endif | |
132 | |
133 // Find a segment with a suitable size to keep around. | 376 // Find a segment with a suitable size to keep around. |
134 Segment* keep = nullptr; | 377 Segment* keep = nullptr; |
135 // Traverse the chained list of segments, zapping (in debug mode) | 378 // Traverse the chained list of segments, zapping (in debug mode) |
136 // and freeing every segment except the one we wish to keep. | 379 // and freeing every segment except the one we wish to keep. |
137 for (Segment* current = segment_head_; current;) { | 380 for (Segment* current = segment_head_; current;) { |
138 Segment* next = current->next(); | 381 Segment* next = current->next(); |
139 if (!keep && current->size() <= kMaximumKeptSegmentSize) { | 382 if (!keep && current->size() <= kMaximumKeptSegmentSize) { |
140 // Unlink the segment we wish to keep from the list. | 383 // Unlink the segment we wish to keep from the list. |
141 keep = current; | 384 keep = current; |
142 keep->clear_next(); | 385 keep->Reset(); |
143 } else { | 386 } else { |
144 size_t size = current->size(); | 387 segment_bytes_allocated_ -= current->size(); |
145 #ifdef DEBUG | 388 allocator_->ChangeCurrentMemoryUsage( |
146 // Un-poison first so the zapping doesn't trigger ASan complaints. | 389 -static_cast<int64_t>(current->size())); |
147 ASAN_UNPOISON_MEMORY_REGION(current, size); | 390 |
148 // Zap the entire current segment (including the header). | 391 if (!SegmentPool::AddSegmentToPool(current)) { |
149 memset(current, kZapDeadByte, size); | 392 SegmentPool::PushSegmentToGarbageStack(current); |
150 #endif | 393 } |
151 DeleteSegment(current, size); | |
152 } | 394 } |
153 current = next; | 395 current = next; |
154 } | 396 } |
155 | 397 |
156 // If we have found a segment we want to keep, we must recompute the | 398 // If we have found a segment we want to keep, we must recompute the |
157 // variables 'position' and 'limit' to prepare for future allocate | 399 // variables 'position' and 'limit' to prepare for future allocate |
158 // attempts. Otherwise, we must clear the position and limit to | 400 // attempts. Otherwise, we must clear the position and limit to |
159 // force a new segment to be allocated on demand. | 401 // force a new segment to be allocated on demand. |
160 if (keep) { | 402 if (keep) { |
161 Address start = keep->start(); | 403 Address start = keep->start(); |
162 position_ = RoundUp(start, kAlignment); | 404 position_ = RoundUp(start, kAlignment); |
163 limit_ = keep->end(); | 405 limit_ = keep->end(); |
164 // Un-poison so we can re-use the segment later. | |
165 ASAN_UNPOISON_MEMORY_REGION(start, keep->capacity()); | |
166 #ifdef DEBUG | |
167 // Zap the contents of the kept segment (but not the header). | |
168 memset(start, kZapDeadByte, keep->capacity()); | |
169 #endif | |
170 } else { | 406 } else { |
171 position_ = limit_ = 0; | 407 position_ = limit_ = 0; |
172 } | 408 } |
173 | 409 |
174 allocation_size_ = 0; | 410 allocation_size_ = 0; |
175 // Update the head segment to be the kept segment (if any). | 411 // Update the head segment to be the kept segment (if any). |
176 segment_head_ = keep; | 412 segment_head_ = keep; |
177 } | 413 } |
178 | 414 |
179 | 415 |
180 void Zone::DeleteKeptSegment() { | 416 void Zone::DeleteKeptSegment() { |
181 #ifdef DEBUG | |
182 // Constant byte value used for zapping dead memory in debug mode. | |
183 static const unsigned char kZapDeadByte = 0xcd; | |
184 #endif | |
185 | |
186 DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr); | 417 DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr); |
187 if (segment_head_ != nullptr) { | 418 if (segment_head_ != nullptr) { |
188 size_t size = segment_head_->size(); | 419 segment_bytes_allocated_ -= segment_head_->size(); |
189 #ifdef DEBUG | 420 allocator_->ChangeCurrentMemoryUsage( |
190 // Un-poison first so the zapping doesn't trigger ASan complaints. | 421 -static_cast<int64_t>(segment_head_->size())); |
191 ASAN_UNPOISON_MEMORY_REGION(segment_head_, size); | 422 if (!SegmentPool::AddSegmentToPool(segment_head_)) { |
192 // Zap the entire kept segment (including the header). | 423 SegmentPool::PushSegmentToGarbageStack(segment_head_); |
193 memset(segment_head_, kZapDeadByte, size); | 424 } |
194 #endif | |
195 DeleteSegment(segment_head_, size); | |
196 segment_head_ = nullptr; | |
197 } | 425 } |
198 | 426 |
199 DCHECK(segment_bytes_allocated_ == 0); | 427 DCHECK(segment_bytes_allocated_ == 0); |
200 } | 428 } |
201 | 429 |
202 | 430 |
203 // Creates a new segment, sets it size, and pushes it to the front | |
204 // of the segment chain. Returns the new segment. | |
205 Segment* Zone::NewSegment(size_t size) { | 431 Segment* Zone::NewSegment(size_t size) { |
206 Segment* result = reinterpret_cast<Segment*>(allocator_->Allocate(size)); | 432 Segment* result = SegmentPool::GetSegmentFromPool(size); |
207 segment_bytes_allocated_ += size; | 433 |
208 if (result != nullptr) { | 434 if (!result) { |
209 result->Initialize(segment_head_, size); | 435 v8::base::VirtualMemory vm(size, kSegmentAlignmentSize); |
210 segment_head_ = result; | 436 |
| 437 if (!vm.IsReserved()) { |
| 438 V8::FatalProcessOutOfMemory("Zone"); |
| 439 return nullptr; |
| 440 } |
| 441 |
| 442 // PrintF("%f; +%lu;0\n", static_cast<double>(clock() - begin) / |
| 443 // CLOCKS_PER_SEC, size); |
| 444 |
| 445 Address base = Address(reinterpret_cast<uintptr_t>(vm.address()) & |
| 446 kSegmentAlignmentMask); |
| 447 |
| 448 // On Windows, VirtualMemory can fail to allocate aligned memory. |
| 449 if (base != vm.address()) { |
| 450 // Address is not aligned. |
| 451 base += kSegmentAlignmentSize; |
| 452 } |
| 453 |
| 454 // The address of the end of the virtual memory |
| 455 Address end = |
| 456 Address(reinterpret_cast<uintptr_t>(vm.address()) + vm.size()); |
| 457 |
| 458 // Check whether the virtual memory is big enough to fit our aligned chunk. |
| 459 DCHECK_LE(base + size, end); |
| 460 |
| 461 // In case the virtual memory is too big, we want to use as much of it as |
| 462 // possible. In normal segments, the segment alignment size is the upper |
| 463 // limit. |
| 464 if (size <= kSegmentAlignmentSize) { |
| 465 size = Min(static_cast<size_t>(end - base), kSegmentAlignmentSize); |
| 466 } |
| 467 |
| 468 if (!v8::base::VirtualMemory::CommitRegion(reinterpret_cast<void*>(base), |
| 469 size, false)) { |
| 470 V8::FatalProcessOutOfMemory("Zone"); |
| 471 return nullptr; |
| 472 } |
| 473 |
| 474 result = reinterpret_cast<Segment*>(base); |
| 475 result->Initialize(this, &vm, size); |
| 476 } else { |
| 477 result->set_zone(this); |
211 } | 478 } |
| 479 |
| 480 segment_bytes_allocated_ += result->size(); |
| 481 allocator_->ChangeCurrentMemoryUsage(result->size()); |
| 482 |
212 return result; | 483 return result; |
213 } | 484 } |
214 | 485 |
| 486 Address Zone::NewLargeObjectSegment(size_t size) { |
| 487 size_t new_size = CalculateSegmentSize(size); |
| 488 Segment* segment = NewSegment(new_size); |
215 | 489 |
216 // Deletes the given segment. Does not touch the segment chain. | 490 if (segment_head_ == nullptr) { |
217 void Zone::DeleteSegment(Segment* segment, size_t size) { | 491 // This is the only case in which a large object segment becomes head of |
218 segment_bytes_allocated_ -= size; | 492 // the segment list. |
219 allocator_->Free(segment, size); | 493 segment_head_ = segment; |
| 494 } else { |
| 495 // Large object segments should be inserted second into the list when |
| 496 // possible. |
| 497 segment->set_next(segment_head_->next()); |
| 498 segment_head_->set_next(segment); |
| 499 } |
| 500 |
| 501 Address result = RoundUp(segment->start(), kAlignment); |
| 502 DCHECK_EQ(GetZoneFromPointer(segment), this); |
| 503 DCHECK_EQ(GetZoneFromPointer(result), this); |
| 504 return result; |
220 } | 505 } |
221 | 506 |
222 | 507 Address Zone::NewNormalSegment(size_t size) { |
223 Address Zone::NewExpand(size_t size) { | |
224 // Make sure the requested size is already properly aligned and that | 508 // Make sure the requested size is already properly aligned and that |
225 // there isn't enough room in the Zone to satisfy the request. | 509 // there isn't enough room in the Zone to satisfy the request. |
226 DCHECK_EQ(size, RoundDown(size, kAlignment)); | 510 DCHECK_EQ(size, RoundDown(size, kAlignment)); |
227 DCHECK(limit_ < position_ || | 511 DCHECK(limit_ < position_ || |
228 reinterpret_cast<uintptr_t>(limit_) - | 512 reinterpret_cast<uintptr_t>(limit_) - |
229 reinterpret_cast<uintptr_t>(position_) < | 513 reinterpret_cast<uintptr_t>(position_) < |
230 size); | 514 size); |
231 | 515 |
232 // Compute the new segment size. We use a 'high water mark' | 516 DCHECK_LE(size, kMaximumSegmentSize + 0); |
233 // strategy, where we increase the segment size every time we expand | 517 |
234 // except that we employ a maximum segment size when we delete. This | 518 size_t new_size = CalculateSegmentSize(size); |
235 // is to avoid excessive malloc() and free() overhead. | 519 const size_t old_size = |
236 Segment* head = segment_head_; | 520 (segment_head_ == nullptr) ? 0 : segment_head_->size(); |
237 const size_t old_size = (head == nullptr) ? 0 : head->size(); | 521 new_size = Max(new_size, old_size << 1); |
238 static const size_t kSegmentOverhead = sizeof(Segment) + kAlignment; | 522 new_size = Min(new_size, kMaximumSegmentSize); |
239 const size_t new_size_no_overhead = size + (old_size << 1); | 523 |
240 size_t new_size = kSegmentOverhead + new_size_no_overhead; | 524 DCHECK_LE(new_size, kMaximumSegmentSize + 0); |
241 const size_t min_new_size = kSegmentOverhead + size; | 525 |
242 // Guard against integer overflow. | |
243 if (new_size_no_overhead < size || new_size < kSegmentOverhead) { | |
244 V8::FatalProcessOutOfMemory("Zone"); | |
245 return nullptr; | |
246 } | |
247 if (new_size < kMinimumSegmentSize) { | |
248 new_size = kMinimumSegmentSize; | |
249 } else if (new_size > kMaximumSegmentSize) { | |
250 // Limit the size of new segments to avoid growing the segment size | |
251 // exponentially, thus putting pressure on contiguous virtual address space. | |
252 // All the while making sure to allocate a segment large enough to hold the | |
253 // requested size. | |
254 new_size = Max(min_new_size, kMaximumSegmentSize); | |
255 } | |
256 if (new_size > INT_MAX) { | |
257 V8::FatalProcessOutOfMemory("Zone"); | |
258 return nullptr; | |
259 } | |
260 Segment* segment = NewSegment(new_size); | 526 Segment* segment = NewSegment(new_size); |
261 if (segment == nullptr) { | 527 |
262 V8::FatalProcessOutOfMemory("Zone"); | 528 // Put segment in front of the segment list. |
263 return nullptr; | 529 segment->set_next(segment_head_); |
264 } | 530 segment_head_ = segment; |
| 531 |
| 532 // Normal segments must not be bigger than the alignment size. |
| 533 DCHECK_LE(segment->size(), kSegmentAlignmentSize + 0); |
265 | 534 |
266 // Recompute 'top' and 'limit' based on the new segment. | 535 // Recompute 'top' and 'limit' based on the new segment. |
267 Address result = RoundUp(segment->start(), kAlignment); | 536 Address result = RoundUp(segment->start(), kAlignment); |
268 position_ = result + size; | 537 position_ = result + size; |
269 // Check for address overflow. | 538 // Check for address overflow. |
270 // (Should not happen since the segment is guaranteed to accomodate | 539 // (Should not happen since the segment is guaranteed to accomodate |
271 // size bytes + header and alignment padding) | 540 // size bytes + header and alignment padding) |
272 DCHECK(reinterpret_cast<uintptr_t>(position_) >= | 541 DCHECK_GE(reinterpret_cast<uintptr_t>(position_), |
273 reinterpret_cast<uintptr_t>(result)); | 542 reinterpret_cast<uintptr_t>(result)); |
| 543 DCHECK_EQ(GetZoneFromPointer(segment), this); |
| 544 DCHECK_EQ(GetZoneFromPointer(result), this); |
| 545 DCHECK_EQ(GetZoneFromPointer(segment->end() - 1), this); |
274 limit_ = segment->end(); | 546 limit_ = segment->end(); |
275 DCHECK(position_ <= limit_); | 547 DCHECK(position_ <= limit_); |
276 return result; | 548 return result; |
277 } | 549 } |
278 | 550 |
| 551 size_t Zone::CalculateSegmentSize(const size_t requested) { |
| 552 if (UINTPTR_MAX - (sizeof(Segment) + kAlignment) < requested) { |
| 553 V8::FatalProcessOutOfMemory("Zone"); |
| 554 } |
| 555 |
| 556 return RoundUp(requested + sizeof(Segment) + kAlignment, kMinimumSegmentSize); |
| 557 } |
| 558 |
279 } // namespace internal | 559 } // namespace internal |
280 } // namespace v8 | 560 } // namespace v8 |
OLD | NEW |