OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_HEAP_H_ | 5 #ifndef V8_HEAP_HEAP_H_ |
6 #define V8_HEAP_HEAP_H_ | 6 #define V8_HEAP_HEAP_H_ |
7 | 7 |
8 #include <cmath> | 8 #include <cmath> |
9 #include <map> | 9 #include <map> |
10 | 10 |
(...skipping 308 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
319 return Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); | 319 return Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); |
320 } | 320 } |
321 | 321 |
322 void SetNewLimit(Address limit) { | 322 void SetNewLimit(Address limit) { |
323 // If we are already using an emergency stack, we can ignore it. | 323 // If we are already using an emergency stack, we can ignore it. |
324 if (emergency_stack_) return; | 324 if (emergency_stack_) return; |
325 | 325 |
326 // If the limit is not on the same page, we can ignore it. | 326 // If the limit is not on the same page, we can ignore it. |
327 if (Page::FromAllocationTop(limit) != GetHeadPage()) return; | 327 if (Page::FromAllocationTop(limit) != GetHeadPage()) return; |
328 | 328 |
329 limit_ = reinterpret_cast<intptr_t*>(limit); | 329 limit_ = reinterpret_cast<struct Entry*>(limit); |
330 | 330 |
331 if (limit_ <= rear_) { | 331 if (limit_ <= rear_) { |
332 return; | 332 return; |
333 } | 333 } |
334 | 334 |
335 RelocateQueueHead(); | 335 RelocateQueueHead(); |
336 } | 336 } |
337 | 337 |
338 bool IsBelowPromotionQueue(Address to_space_top) { | 338 bool IsBelowPromotionQueue(Address to_space_top) { |
339 // If an emergency stack is used, the to-space address cannot interfere | 339 // If an emergency stack is used, the to-space address cannot interfere |
340 // with the promotion queue. | 340 // with the promotion queue. |
341 if (emergency_stack_) return true; | 341 if (emergency_stack_) return true; |
342 | 342 |
343 // If the given to-space top pointer and the head of the promotion queue | 343 // If the given to-space top pointer and the head of the promotion queue |
344 // are not on the same page, then the to-space objects are below the | 344 // are not on the same page, then the to-space objects are below the |
345 // promotion queue. | 345 // promotion queue. |
346 if (GetHeadPage() != Page::FromAddress(to_space_top)) { | 346 if (GetHeadPage() != Page::FromAddress(to_space_top)) { |
347 return true; | 347 return true; |
348 } | 348 } |
349 // If the to space top pointer is smaller or equal than the promotion | 349 // If the to space top pointer is smaller or equal than the promotion |
350 // queue head, then the to-space objects are below the promotion queue. | 350 // queue head, then the to-space objects are below the promotion queue. |
351 return reinterpret_cast<intptr_t*>(to_space_top) <= rear_; | 351 return reinterpret_cast<struct Entry*>(to_space_top) <= rear_; |
352 } | 352 } |
353 | 353 |
354 bool is_empty() { | 354 bool is_empty() { |
355 return (front_ == rear_) && | 355 return (front_ == rear_) && |
356 (emergency_stack_ == NULL || emergency_stack_->length() == 0); | 356 (emergency_stack_ == NULL || emergency_stack_->length() == 0); |
357 } | 357 } |
358 | 358 |
359 inline void insert(HeapObject* target, int size); | 359 inline void insert(HeapObject* target, intptr_t size); |
360 | 360 |
361 void remove(HeapObject** target, int* size) { | 361 void remove(HeapObject** target, intptr_t* size) { |
362 DCHECK(!is_empty()); | 362 DCHECK(!is_empty()); |
363 if (front_ == rear_) { | 363 if (front_ == rear_) { |
364 Entry e = emergency_stack_->RemoveLast(); | 364 Entry e = emergency_stack_->RemoveLast(); |
365 *target = e.obj_; | 365 *target = e.obj_; |
366 *size = e.size_; | 366 *size = e.size_; |
367 return; | 367 return; |
368 } | 368 } |
369 | 369 |
370 *target = reinterpret_cast<HeapObject*>(*(--front_)); | 370 struct Entry* entry = reinterpret_cast<struct Entry*>(--front_); |
371 *size = static_cast<int>(*(--front_)); | 371 *target = entry->obj_; |
| 372 *size = entry->size_; |
| 373 |
372 // Assert no underflow. | 374 // Assert no underflow. |
373 SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_), | 375 SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_), |
374 reinterpret_cast<Address>(front_)); | 376 reinterpret_cast<Address>(front_)); |
375 } | 377 } |
376 | 378 |
377 private: | 379 private: |
378 // The front of the queue is higher in the memory page chain than the rear. | |
379 intptr_t* front_; | |
380 intptr_t* rear_; | |
381 intptr_t* limit_; | |
382 | |
383 static const int kEntrySizeInWords = 2; | 380 static const int kEntrySizeInWords = 2; |
384 | 381 |
385 struct Entry { | 382 struct Entry { |
386 Entry(HeapObject* obj, int size) : obj_(obj), size_(size) {} | 383 Entry(HeapObject* obj, intptr_t size) : obj_(obj), size_(size) {} |
387 | 384 |
388 HeapObject* obj_; | 385 HeapObject* obj_; |
389 int size_; | 386 intptr_t size_; |
390 }; | 387 }; |
| 388 |
| 389 // The front of the queue is higher in the memory page chain than the rear. |
| 390 struct Entry* front_; |
| 391 struct Entry* rear_; |
| 392 struct Entry* limit_; |
| 393 |
391 List<Entry>* emergency_stack_; | 394 List<Entry>* emergency_stack_; |
392 | 395 |
393 Heap* heap_; | 396 Heap* heap_; |
394 | 397 |
395 void RelocateQueueHead(); | 398 void RelocateQueueHead(); |
396 | 399 |
| 400 STATIC_ASSERT(sizeof(struct Entry) == kEntrySizeInWords * kPointerSize); |
| 401 |
397 DISALLOW_COPY_AND_ASSIGN(PromotionQueue); | 402 DISALLOW_COPY_AND_ASSIGN(PromotionQueue); |
398 }; | 403 }; |
399 | 404 |
400 | 405 |
401 enum ArrayStorageAllocationMode { | 406 enum ArrayStorageAllocationMode { |
402 DONT_INITIALIZE_ARRAY_ELEMENTS, | 407 DONT_INITIALIZE_ARRAY_ELEMENTS, |
403 INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE | 408 INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE |
404 }; | 409 }; |
405 | 410 |
406 enum class ClearRecordedSlots { kYes, kNo }; | 411 enum class ClearRecordedSlots { kYes, kNo }; |
(...skipping 2218 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2625 friend class LargeObjectSpace; | 2630 friend class LargeObjectSpace; |
2626 friend class NewSpace; | 2631 friend class NewSpace; |
2627 friend class PagedSpace; | 2632 friend class PagedSpace; |
2628 DISALLOW_COPY_AND_ASSIGN(AllocationObserver); | 2633 DISALLOW_COPY_AND_ASSIGN(AllocationObserver); |
2629 }; | 2634 }; |
2630 | 2635 |
2631 } // namespace internal | 2636 } // namespace internal |
2632 } // namespace v8 | 2637 } // namespace v8 |
2633 | 2638 |
2634 #endif // V8_HEAP_HEAP_H_ | 2639 #endif // V8_HEAP_HEAP_H_ |
OLD | NEW |