| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 295 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 306 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); | 306 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); |
| 307 } | 307 } |
| 308 | 308 |
| 309 // Only works for addresses in pointer spaces, not data or code spaces. | 309 // Only works for addresses in pointer spaces, not data or code spaces. |
| 310 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr); | 310 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr); |
| 311 | 311 |
| 312 Address address() { return reinterpret_cast<Address>(this); } | 312 Address address() { return reinterpret_cast<Address>(this); } |
| 313 | 313 |
| 314 bool is_valid() { return address() != NULL; } | 314 bool is_valid() { return address() != NULL; } |
| 315 | 315 |
| 316 MemoryChunk* next_chunk() const { return next_chunk_; } | 316 MemoryChunk* next_chunk() const { |
| 317 MemoryChunk* prev_chunk() const { return prev_chunk_; } | 317 return reinterpret_cast<MemoryChunk*>(Acquire_Load(&next_chunk_)); |
| 318 } |
| 318 | 319 |
| 319 void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; } | 320 MemoryChunk* prev_chunk() const { |
| 320 void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; } | 321 return reinterpret_cast<MemoryChunk*>(Acquire_Load(&prev_chunk_)); |
| 322 } |
| 323 |
| 324 void set_next_chunk(MemoryChunk* next) { |
| 325 Release_Store(&next_chunk_, reinterpret_cast<AtomicWord>(next)); |
| 326 } |
| 327 |
| 328 void set_prev_chunk(MemoryChunk* prev) { |
| 329 Release_Store(&prev_chunk_, reinterpret_cast<AtomicWord>(prev)); |
| 330 } |
| 321 | 331 |
| 322 Space* owner() const { | 332 Space* owner() const { |
| 323 if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) == | 333 if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) == |
| 324 kFailureTag) { | 334 kFailureTag) { |
| 325 return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) - | 335 return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) - |
| 326 kFailureTag); | 336 kFailureTag); |
| 327 } else { | 337 } else { |
| 328 return NULL; | 338 return NULL; |
| 329 } | 339 } |
| 330 } | 340 } |
| (...skipping 198 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 529 MemoryChunk::FromAddress(address)->IncrementLiveBytes(by); | 539 MemoryChunk::FromAddress(address)->IncrementLiveBytes(by); |
| 530 } | 540 } |
| 531 | 541 |
| 532 static void IncrementLiveBytesFromMutator(Address address, int by); | 542 static void IncrementLiveBytesFromMutator(Address address, int by); |
| 533 | 543 |
| 534 static const intptr_t kAlignment = | 544 static const intptr_t kAlignment = |
| 535 (static_cast<uintptr_t>(1) << kPageSizeBits); | 545 (static_cast<uintptr_t>(1) << kPageSizeBits); |
| 536 | 546 |
| 537 static const intptr_t kAlignmentMask = kAlignment - 1; | 547 static const intptr_t kAlignmentMask = kAlignment - 1; |
| 538 | 548 |
| 539 static const intptr_t kSizeOffset = kPointerSize + kPointerSize; | 549 static const intptr_t kSizeOffset = 0; |
| 540 | 550 |
| 541 static const intptr_t kLiveBytesOffset = | 551 static const intptr_t kLiveBytesOffset = |
| 542 kSizeOffset + kPointerSize + kPointerSize + kPointerSize + | 552 kSizeOffset + kPointerSize + kPointerSize + kPointerSize + |
| 543 kPointerSize + kPointerSize + | 553 kPointerSize + kPointerSize + |
| 544 kPointerSize + kPointerSize + kPointerSize + kIntSize; | 554 kPointerSize + kPointerSize + kPointerSize + kIntSize; |
| 545 | 555 |
| 546 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize; | 556 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize; |
| 547 | 557 |
| 548 static const size_t kWriteBarrierCounterOffset = | 558 static const size_t kWriteBarrierCounterOffset = |
| 549 kSlotsBufferOffset + kPointerSize + kPointerSize; | 559 kSlotsBufferOffset + kPointerSize + kPointerSize; |
| 550 | 560 |
| 551 static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize + | 561 static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize + |
| 552 kIntSize + kIntSize + kPointerSize + | 562 kIntSize + kIntSize + kPointerSize + |
| 553 5 * kPointerSize; | 563 5 * kPointerSize + |
| 564 kPointerSize + kPointerSize; |
| 554 | 565 |
| 555 static const int kBodyOffset = | 566 static const int kBodyOffset = |
| 556 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); | 567 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); |
| 557 | 568 |
| 558 // The start offset of the object area in a page. Aligned to both maps and | 569 // The start offset of the object area in a page. Aligned to both maps and |
| 559 // code alignment to be suitable for both. Also aligned to 32 words because | 570 // code alignment to be suitable for both. Also aligned to 32 words because |
| 560 // the marking bitmap is arranged in 32 bit chunks. | 571 // the marking bitmap is arranged in 32 bit chunks. |
| 561 static const int kObjectStartAlignment = 32 * kPointerSize; | 572 static const int kObjectStartAlignment = 32 * kPointerSize; |
| 562 static const int kObjectStartOffset = kBodyOffset - 1 + | 573 static const int kObjectStartOffset = kBodyOffset - 1 + |
| 563 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); | 574 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 615 | 626 |
| 616 inline Address MarkbitIndexToAddress(uint32_t index) { | 627 inline Address MarkbitIndexToAddress(uint32_t index) { |
| 617 return this->address() + (index << kPointerSizeLog2); | 628 return this->address() + (index << kPointerSizeLog2); |
| 618 } | 629 } |
| 619 | 630 |
| 620 void InsertAfter(MemoryChunk* other); | 631 void InsertAfter(MemoryChunk* other); |
| 621 void Unlink(); | 632 void Unlink(); |
| 622 | 633 |
| 623 inline Heap* heap() { return heap_; } | 634 inline Heap* heap() { return heap_; } |
| 624 | 635 |
| 625 static const int kFlagsOffset = kPointerSize * 3; | 636 static const int kFlagsOffset = kPointerSize; |
| 626 | 637 |
| 627 bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); } | 638 bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); } |
| 628 | 639 |
| 629 bool ShouldSkipEvacuationSlotRecording() { | 640 bool ShouldSkipEvacuationSlotRecording() { |
| 630 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; | 641 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; |
| 631 } | 642 } |
| 632 | 643 |
| 633 inline SkipList* skip_list() { | 644 inline SkipList* skip_list() { |
| 634 return skip_list_; | 645 return skip_list_; |
| 635 } | 646 } |
| (...skipping 28 matching lines...) Expand all Loading... |
| 664 bool CommitArea(size_t requested); | 675 bool CommitArea(size_t requested); |
| 665 | 676 |
| 666 // Approximate amount of physical memory committed for this chunk. | 677 // Approximate amount of physical memory committed for this chunk. |
| 667 size_t CommittedPhysicalMemory() { | 678 size_t CommittedPhysicalMemory() { |
| 668 return high_water_mark_; | 679 return high_water_mark_; |
| 669 } | 680 } |
| 670 | 681 |
| 671 static inline void UpdateHighWaterMark(Address mark); | 682 static inline void UpdateHighWaterMark(Address mark); |
| 672 | 683 |
| 673 protected: | 684 protected: |
| 674 MemoryChunk* next_chunk_; | |
| 675 MemoryChunk* prev_chunk_; | |
| 676 size_t size_; | 685 size_t size_; |
| 677 intptr_t flags_; | 686 intptr_t flags_; |
| 678 | 687 |
| 679 // Start and end of allocatable memory on this chunk. | 688 // Start and end of allocatable memory on this chunk. |
| 680 Address area_start_; | 689 Address area_start_; |
| 681 Address area_end_; | 690 Address area_end_; |
| 682 | 691 |
| 683 // If the chunk needs to remember its memory reservation, it is stored here. | 692 // If the chunk needs to remember its memory reservation, it is stored here. |
| 684 VirtualMemory reservation_; | 693 VirtualMemory reservation_; |
| 685 // The identity of the owning space. This is tagged as a failure pointer, but | 694 // The identity of the owning space. This is tagged as a failure pointer, but |
| (...skipping 26 matching lines...) Expand all Loading... |
| 712 intptr_t non_available_small_blocks_; | 721 intptr_t non_available_small_blocks_; |
| 713 | 722 |
| 714 static MemoryChunk* Initialize(Heap* heap, | 723 static MemoryChunk* Initialize(Heap* heap, |
| 715 Address base, | 724 Address base, |
| 716 size_t size, | 725 size_t size, |
| 717 Address area_start, | 726 Address area_start, |
| 718 Address area_end, | 727 Address area_end, |
| 719 Executability executable, | 728 Executability executable, |
| 720 Space* owner); | 729 Space* owner); |
| 721 | 730 |
| 731 private: |
| 732 // next_chunk_ holds a pointer of type MemoryChunk |
| 733 AtomicWord next_chunk_; |
| 734 // prev_chunk_ holds a pointer of type MemoryChunk |
| 735 AtomicWord prev_chunk_; |
| 736 |
| 722 friend class MemoryAllocator; | 737 friend class MemoryAllocator; |
| 723 }; | 738 }; |
| 724 | 739 |
| 725 | 740 |
| 726 STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); | 741 STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); |
| 727 | 742 |
| 728 | 743 |
| 729 // ----------------------------------------------------------------------------- | 744 // ----------------------------------------------------------------------------- |
| 730 // A page is a memory chunk of a size 1MB. Large object pages may be larger. | 745 // A page is a memory chunk of a size 1MB. Large object pages may be larger. |
| 731 // | 746 // |
| (...skipping 2210 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2942 } | 2957 } |
| 2943 // Must be small, since an iteration is used for lookup. | 2958 // Must be small, since an iteration is used for lookup. |
| 2944 static const int kMaxComments = 64; | 2959 static const int kMaxComments = 64; |
| 2945 }; | 2960 }; |
| 2946 #endif | 2961 #endif |
| 2947 | 2962 |
| 2948 | 2963 |
| 2949 } } // namespace v8::internal | 2964 } } // namespace v8::internal |
| 2950 | 2965 |
| 2951 #endif // V8_SPACES_H_ | 2966 #endif // V8_SPACES_H_ |
| OLD | NEW |