OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_HEAP_H_ | 5 #ifndef V8_HEAP_HEAP_H_ |
6 #define V8_HEAP_HEAP_H_ | 6 #define V8_HEAP_HEAP_H_ |
7 | 7 |
8 #include <cmath> | 8 #include <cmath> |
9 | 9 |
10 #include "src/allocation.h" | 10 #include "src/allocation.h" |
(...skipping 384 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
395 // store buffer overflows again we can exempt the page from the store buffer | 395 // store buffer overflows again we can exempt the page from the store buffer |
396 // by rewinding to this point instead of having to search the store buffer. | 396 // by rewinding to this point instead of having to search the store buffer. |
397 Object*** start_of_current_page_; | 397 Object*** start_of_current_page_; |
398 // The current page we are scanning in the store buffer iterator. | 398 // The current page we are scanning in the store buffer iterator. |
399 MemoryChunk* current_page_; | 399 MemoryChunk* current_page_; |
400 }; | 400 }; |
401 | 401 |
402 | 402 |
403 // A queue of objects promoted during scavenge. Each object is accompanied | 403 // A queue of objects promoted during scavenge. Each object is accompanied |
404 // by it's size to avoid dereferencing a map pointer for scanning. | 404 // by it's size to avoid dereferencing a map pointer for scanning. |
| 405 // The last page in to-space is used for the promotion queue. On conflict |
| 406 // during scavenge, the promotion queue is allocated externally and all |
| 407 // entries are copied to the external queue. |
405 class PromotionQueue { | 408 class PromotionQueue { |
406 public: | 409 public: |
407 explicit PromotionQueue(Heap* heap) | 410 explicit PromotionQueue(Heap* heap) |
408 : front_(NULL), | 411 : front_(NULL), |
409 rear_(NULL), | 412 rear_(NULL), |
410 limit_(NULL), | 413 limit_(NULL), |
411 emergency_stack_(0), | 414 emergency_stack_(0), |
412 heap_(heap) {} | 415 heap_(heap) {} |
413 | 416 |
414 void Initialize(); | 417 void Initialize(); |
415 | 418 |
416 void Destroy() { | 419 void Destroy() { |
417 DCHECK(is_empty()); | 420 DCHECK(is_empty()); |
418 delete emergency_stack_; | 421 delete emergency_stack_; |
419 emergency_stack_ = NULL; | 422 emergency_stack_ = NULL; |
420 } | 423 } |
421 | 424 |
422 Page* GetHeadPage() { | 425 Page* GetHeadPage() { |
423 return Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); | 426 return Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); |
424 } | 427 } |
425 | 428 |
426 void SetNewLimit(Address limit) { | 429 void SetNewLimit(Address limit) { |
| 430 // If we are already using an emergency stack, we can ignore it. |
| 431 if (emergency_stack_) return; |
| 432 |
| 433 // If the limit is not on the same page, we can ignore it. |
| 434 if (Page::FromAllocationTop(limit) != GetHeadPage()) return; |
| 435 |
427 limit_ = reinterpret_cast<intptr_t*>(limit); | 436 limit_ = reinterpret_cast<intptr_t*>(limit); |
428 | 437 |
429 if (limit_ <= rear_) { | 438 if (limit_ <= rear_) { |
430 return; | 439 return; |
431 } | 440 } |
432 | 441 |
433 RelocateQueueHead(); | 442 RelocateQueueHead(); |
434 } | 443 } |
435 | 444 |
436 bool IsBelowPromotionQueue(Address to_space_top) { | 445 bool IsBelowPromotionQueue(Address to_space_top) { |
| 446 // If an emergency stack is used, the to-space address cannot interfere |
| 447 // with the promotion queue. |
| 448 if (emergency_stack_) return true; |
| 449 |
437 // If the given to-space top pointer and the head of the promotion queue | 450 // If the given to-space top pointer and the head of the promotion queue |
438 // are not on the same page, then the to-space objects are below the | 451 // are not on the same page, then the to-space objects are below the |
439 // promotion queue. | 452 // promotion queue. |
440 if (GetHeadPage() != Page::FromAddress(to_space_top)) { | 453 if (GetHeadPage() != Page::FromAddress(to_space_top)) { |
441 return true; | 454 return true; |
442 } | 455 } |
443 // If the to space top pointer is smaller or equal than the promotion | 456 // If the to space top pointer is smaller or equal than the promotion |
444 // queue head, then the to-space objects are below the promotion queue. | 457 // queue head, then the to-space objects are below the promotion queue. |
445 return reinterpret_cast<intptr_t*>(to_space_top) <= rear_; | 458 return reinterpret_cast<intptr_t*>(to_space_top) <= rear_; |
446 } | 459 } |
447 | 460 |
448 bool is_empty() { | 461 bool is_empty() { |
449 return (front_ == rear_) && | 462 return (front_ == rear_) && |
450 (emergency_stack_ == NULL || emergency_stack_->length() == 0); | 463 (emergency_stack_ == NULL || emergency_stack_->length() == 0); |
451 } | 464 } |
452 | 465 |
453 inline void insert(HeapObject* target, int size); | 466 inline void insert(HeapObject* target, int size); |
454 | 467 |
455 void remove(HeapObject** target, int* size) { | 468 void remove(HeapObject** target, int* size) { |
456 DCHECK(!is_empty()); | 469 DCHECK(!is_empty()); |
457 if (front_ == rear_) { | 470 if (front_ == rear_) { |
458 Entry e = emergency_stack_->RemoveLast(); | 471 Entry e = emergency_stack_->RemoveLast(); |
459 *target = e.obj_; | 472 *target = e.obj_; |
460 *size = e.size_; | 473 *size = e.size_; |
461 return; | 474 return; |
462 } | 475 } |
463 | 476 |
464 if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) { | |
465 NewSpacePage* front_page = | |
466 NewSpacePage::FromAddress(reinterpret_cast<Address>(front_)); | |
467 DCHECK(!front_page->prev_page()->is_anchor()); | |
468 front_ = reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end()); | |
469 } | |
470 *target = reinterpret_cast<HeapObject*>(*(--front_)); | 477 *target = reinterpret_cast<HeapObject*>(*(--front_)); |
471 *size = static_cast<int>(*(--front_)); | 478 *size = static_cast<int>(*(--front_)); |
472 // Assert no underflow. | 479 // Assert no underflow. |
473 SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_), | 480 SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_), |
474 reinterpret_cast<Address>(front_)); | 481 reinterpret_cast<Address>(front_)); |
475 } | 482 } |
476 | 483 |
477 private: | 484 private: |
478 // The front of the queue is higher in the memory page chain than the rear. | 485 // The front of the queue is higher in the memory page chain than the rear. |
479 intptr_t* front_; | 486 intptr_t* front_; |
(...skipping 2114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2594 DisallowHeapAllocation no_allocation; // i.e. no gc allowed. | 2601 DisallowHeapAllocation no_allocation; // i.e. no gc allowed. |
2595 | 2602 |
2596 private: | 2603 private: |
2597 DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer); | 2604 DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer); |
2598 }; | 2605 }; |
2599 #endif // DEBUG | 2606 #endif // DEBUG |
2600 } | 2607 } |
2601 } // namespace v8::internal | 2608 } // namespace v8::internal |
2602 | 2609 |
2603 #endif // V8_HEAP_HEAP_H_ | 2610 #endif // V8_HEAP_HEAP_H_ |
OLD | NEW |