OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
7 | 7 |
8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
9 #include "src/atomic-utils.h" | 9 #include "src/atomic-utils.h" |
10 #include "src/base/atomicops.h" | 10 #include "src/base/atomicops.h" |
(...skipping 380 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
391 + kPointerSize // Address owner_ | 391 + kPointerSize // Address owner_ |
392 + kPointerSize // Heap* heap_ | 392 + kPointerSize // Heap* heap_ |
393 + kIntSize; // int progress_bar_ | 393 + kIntSize; // int progress_bar_ |
394 | 394 |
395 static const size_t kSlotsBufferOffset = | 395 static const size_t kSlotsBufferOffset = |
396 kLiveBytesOffset + kIntSize; // int live_byte_count_ | 396 kLiveBytesOffset + kIntSize; // int live_byte_count_ |
397 | 397 |
398 static const size_t kWriteBarrierCounterOffset = | 398 static const size_t kWriteBarrierCounterOffset = |
399 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_; | 399 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_; |
400 + kPointerSize // SlotSet* old_to_new_slots_; | 400 + kPointerSize // SlotSet* old_to_new_slots_; |
| 401 + kPointerSize // SlotSet* old_to_old_slots_; |
401 + kPointerSize; // SkipList* skip_list_; | 402 + kPointerSize; // SkipList* skip_list_; |
402 | 403 |
403 static const size_t kMinHeaderSize = | 404 static const size_t kMinHeaderSize = |
404 kWriteBarrierCounterOffset + | 405 kWriteBarrierCounterOffset + |
405 kIntptrSize // intptr_t write_barrier_counter_ | 406 kIntptrSize // intptr_t write_barrier_counter_ |
406 + kPointerSize // AtomicValue high_water_mark_ | 407 + kPointerSize // AtomicValue high_water_mark_ |
407 + kPointerSize // base::Mutex* mutex_ | 408 + kPointerSize // base::Mutex* mutex_ |
408 + kPointerSize // base::AtomicWord parallel_sweeping_ | 409 + kPointerSize // base::AtomicWord parallel_sweeping_ |
409 + kPointerSize // AtomicValue parallel_compaction_ | 410 + kPointerSize // AtomicValue parallel_compaction_ |
410 + 5 * kPointerSize // AtomicNumber free-list statistics | 411 + 5 * kPointerSize // AtomicNumber free-list statistics |
(...skipping 19 matching lines...) Expand all Loading... |
430 static const int kFlagsOffset = kPointerSize; | 431 static const int kFlagsOffset = kPointerSize; |
431 | 432 |
432 static inline void IncrementLiveBytesFromMutator(HeapObject* object, int by); | 433 static inline void IncrementLiveBytesFromMutator(HeapObject* object, int by); |
433 static inline void IncrementLiveBytesFromGC(HeapObject* object, int by); | 434 static inline void IncrementLiveBytesFromGC(HeapObject* object, int by); |
434 | 435 |
435 // Only works if the pointer is in the first kPageSize of the MemoryChunk. | 436 // Only works if the pointer is in the first kPageSize of the MemoryChunk. |
436 static MemoryChunk* FromAddress(Address a) { | 437 static MemoryChunk* FromAddress(Address a) { |
437 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); | 438 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); |
438 } | 439 } |
439 | 440 |
440 // Only works for addresses in pointer spaces, not data or code spaces. | |
441 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr); | 441 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr); |
442 | 442 |
443 static inline void UpdateHighWaterMark(Address mark) { | 443 static inline void UpdateHighWaterMark(Address mark) { |
444 if (mark == nullptr) return; | 444 if (mark == nullptr) return; |
445 // Need to subtract one from the mark because when a chunk is full the | 445 // Need to subtract one from the mark because when a chunk is full the |
446 // top points to the next address after the chunk, which effectively belongs | 446 // top points to the next address after the chunk, which effectively belongs |
447 // to another chunk. See the comment to Page::FromAllocationTop. | 447 // to another chunk. See the comment to Page::FromAllocationTop. |
448 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); | 448 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); |
449 intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address()); | 449 intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address()); |
450 intptr_t old_mark = 0; | 450 intptr_t old_mark = 0; |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
507 | 507 |
508 inline SkipList* skip_list() { return skip_list_; } | 508 inline SkipList* skip_list() { return skip_list_; } |
509 | 509 |
510 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; } | 510 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; } |
511 | 511 |
512 inline SlotsBuffer* slots_buffer() { return slots_buffer_; } | 512 inline SlotsBuffer* slots_buffer() { return slots_buffer_; } |
513 | 513 |
514 inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; } | 514 inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; } |
515 | 515 |
516 inline SlotSet* old_to_new_slots() { return old_to_new_slots_; } | 516 inline SlotSet* old_to_new_slots() { return old_to_new_slots_; } |
| 517 inline SlotSet* old_to_old_slots() { return old_to_old_slots_; } |
517 | 518 |
518 void AllocateOldToNewSlots(); | 519 void AllocateOldToNewSlots(); |
519 void ReleaseOldToNewSlots(); | 520 void ReleaseOldToNewSlots(); |
| 521 void AllocateOldToOldSlots(); |
| 522 void ReleaseOldToOldSlots(); |
| 523 |
520 | 524 |
521 Address area_start() { return area_start_; } | 525 Address area_start() { return area_start_; } |
522 Address area_end() { return area_end_; } | 526 Address area_end() { return area_end_; } |
523 int area_size() { return static_cast<int>(area_end() - area_start()); } | 527 int area_size() { return static_cast<int>(area_end() - area_start()); } |
524 | 528 |
525 bool CommitArea(size_t requested); | 529 bool CommitArea(size_t requested); |
526 | 530 |
527 // Approximate amount of physical memory committed for this chunk. | 531 // Approximate amount of physical memory committed for this chunk. |
528 size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); } | 532 size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); } |
529 | 533 |
(...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
677 | 681 |
678 // Count of bytes marked black on page. | 682 // Count of bytes marked black on page. |
679 int live_byte_count_; | 683 int live_byte_count_; |
680 | 684 |
681 SlotsBuffer* slots_buffer_; | 685 SlotsBuffer* slots_buffer_; |
682 | 686 |
683 // A single slot set for small pages (of size kPageSize) or an array of slot | 687 // A single slot set for small pages (of size kPageSize) or an array of slot |
684 // set for large pages. In the latter case the number of entries in the array | 688 // set for large pages. In the latter case the number of entries in the array |
685 // is ceil(size() / kPageSize). | 689 // is ceil(size() / kPageSize). |
686 SlotSet* old_to_new_slots_; | 690 SlotSet* old_to_new_slots_; |
| 691 SlotSet* old_to_old_slots_; |
687 | 692 |
688 SkipList* skip_list_; | 693 SkipList* skip_list_; |
689 | 694 |
690 intptr_t write_barrier_counter_; | 695 intptr_t write_barrier_counter_; |
691 | 696 |
692 // Assuming the initial allocation on a page is sequential, | 697 // Assuming the initial allocation on a page is sequential, |
693 // count highest number of bytes ever allocated on the page. | 698 // count highest number of bytes ever allocated on the page. |
694 AtomicValue<intptr_t> high_water_mark_; | 699 AtomicValue<intptr_t> high_water_mark_; |
695 | 700 |
696 base::Mutex* mutex_; | 701 base::Mutex* mutex_; |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
730 class Page : public MemoryChunk { | 735 class Page : public MemoryChunk { |
731 public: | 736 public: |
732 // Returns the page containing a given address. The address ranges | 737 // Returns the page containing a given address. The address ranges |
733 // from [page_addr .. page_addr + kPageSize[ | 738 // from [page_addr .. page_addr + kPageSize[ |
734 // This only works if the object is in fact in a page. See also MemoryChunk:: | 739 // This only works if the object is in fact in a page. See also MemoryChunk:: |
735 // FromAddress() and FromAnyAddress(). | 740 // FromAddress() and FromAnyAddress(). |
736 INLINE(static Page* FromAddress(Address a)) { | 741 INLINE(static Page* FromAddress(Address a)) { |
737 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask); | 742 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask); |
738 } | 743 } |
739 | 744 |
| 745 // Only works for addresses in pointer spaces, not code space. |
| 746 inline static Page* FromAnyPointerAddress(Heap* heap, Address addr); |
| 747 |
740 // Returns the page containing an allocation top. Because an allocation | 748 // Returns the page containing an allocation top. Because an allocation |
741 // top address can be the upper bound of the page, we need to subtract | 749 // top address can be the upper bound of the page, we need to subtract |
742 // it with kPointerSize first. The address ranges from | 750 // it with kPointerSize first. The address ranges from |
743 // [page_addr + kObjectStartOffset .. page_addr + kPageSize]. | 751 // [page_addr + kObjectStartOffset .. page_addr + kPageSize]. |
744 INLINE(static Page* FromAllocationTop(Address top)) { | 752 INLINE(static Page* FromAllocationTop(Address top)) { |
745 Page* p = FromAddress(top - kPointerSize); | 753 Page* p = FromAddress(top - kPointerSize); |
746 return p; | 754 return p; |
747 } | 755 } |
748 | 756 |
749 // Returns the next page in the chain of pages owned by a space. | 757 // Returns the next page in the chain of pages owned by a space. |
(...skipping 2268 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3018 count = 0; | 3026 count = 0; |
3019 } | 3027 } |
3020 // Must be small, since an iteration is used for lookup. | 3028 // Must be small, since an iteration is used for lookup. |
3021 static const int kMaxComments = 64; | 3029 static const int kMaxComments = 64; |
3022 }; | 3030 }; |
3023 #endif | 3031 #endif |
3024 } // namespace internal | 3032 } // namespace internal |
3025 } // namespace v8 | 3033 } // namespace v8 |
3026 | 3034 |
3027 #endif // V8_HEAP_SPACES_H_ | 3035 #endif // V8_HEAP_SPACES_H_ |
OLD | NEW |